##// END OF EJS Templates
revlog: use a "radix" to address revlog...
marmoute -
r47921:8d3c2f9d default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,55 +1,60 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 # Dump revlogs as raw data stream
2 # Dump revlogs as raw data stream
3 # $ find .hg/store/ -name "*.i" | xargs dumprevlog > repo.dump
3 # $ find .hg/store/ -name "*.i" | xargs dumprevlog > repo.dump
4
4
5 from __future__ import absolute_import, print_function
5 from __future__ import absolute_import, print_function
6
6
7 import sys
7 import sys
8 from mercurial.node import hex
8 from mercurial.node import hex
9 from mercurial import (
9 from mercurial import (
10 encoding,
10 encoding,
11 pycompat,
11 pycompat,
12 revlog,
12 revlog,
13 )
13 )
14 from mercurial.utils import procutil
14 from mercurial.utils import procutil
15
15
16 from mercurial.revlogutils import (
16 from mercurial.revlogutils import (
17 constants as revlog_constants,
17 constants as revlog_constants,
18 )
18 )
19
19
20 for fp in (sys.stdin, sys.stdout, sys.stderr):
20 for fp in (sys.stdin, sys.stdout, sys.stderr):
21 procutil.setbinary(fp)
21 procutil.setbinary(fp)
22
22
23
23
24 def binopen(path, mode=b'rb'):
24 def binopen(path, mode=b'rb'):
25 if b'b' not in mode:
25 if b'b' not in mode:
26 mode = mode + b'b'
26 mode = mode + b'b'
27 return open(path, pycompat.sysstr(mode))
27 return open(path, pycompat.sysstr(mode))
28
28
29
29
30 binopen.options = {}
30 binopen.options = {}
31
31
32
32
33 def printb(data, end=b'\n'):
33 def printb(data, end=b'\n'):
34 sys.stdout.flush()
34 sys.stdout.flush()
35 procutil.stdout.write(data + end)
35 procutil.stdout.write(data + end)
36
36
37
37
38 for f in sys.argv[1:]:
38 for f in sys.argv[1:]:
39 localf = encoding.strtolocal(f)
40 if not localf.endswith(b'.i'):
41 print("file:", f, file=sys.stderr)
42 print(" invalida filename", file=sys.stderr)
43
39 r = revlog.revlog(
44 r = revlog.revlog(
40 binopen,
45 binopen,
41 target=(revlog_constants.KIND_OTHER, b'dump-revlog'),
46 target=(revlog_constants.KIND_OTHER, b'dump-revlog'),
42 indexfile=encoding.strtolocal(f),
47 radix=localf[:-2],
43 )
48 )
44 print("file:", f)
49 print("file:", f)
45 for i in r:
50 for i in r:
46 n = r.node(i)
51 n = r.node(i)
47 p = r.parents(n)
52 p = r.parents(n)
48 d = r.revision(n)
53 d = r.revision(n)
49 printb(b"node: %s" % hex(n))
54 printb(b"node: %s" % hex(n))
50 printb(b"linkrev: %d" % r.linkrev(i))
55 printb(b"linkrev: %d" % r.linkrev(i))
51 printb(b"parents: %s %s" % (hex(p[0]), hex(p[1])))
56 printb(b"parents: %s %s" % (hex(p[0]), hex(p[1])))
52 printb(b"length: %d" % len(d))
57 printb(b"length: %d" % len(d))
53 printb(b"-start-")
58 printb(b"-start-")
54 printb(d)
59 printb(d)
55 printb(b"-end-")
60 printb(b"-end-")
@@ -1,3959 +1,3971 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69
69
70 import mercurial.revlog
70 import mercurial.revlog
71 from mercurial import (
71 from mercurial import (
72 changegroup,
72 changegroup,
73 cmdutil,
73 cmdutil,
74 commands,
74 commands,
75 copies,
75 copies,
76 error,
76 error,
77 extensions,
77 extensions,
78 hg,
78 hg,
79 mdiff,
79 mdiff,
80 merge,
80 merge,
81 util,
81 util,
82 )
82 )
83
83
84 # for "historical portability":
84 # for "historical portability":
85 # try to import modules separately (in dict order), and ignore
85 # try to import modules separately (in dict order), and ignore
86 # failure, because these aren't available with early Mercurial
86 # failure, because these aren't available with early Mercurial
87 try:
87 try:
88 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 from mercurial import branchmap # since 2.5 (or bcee63733aad)
89 except ImportError:
89 except ImportError:
90 pass
90 pass
91 try:
91 try:
92 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
93 except ImportError:
93 except ImportError:
94 pass
94 pass
95 try:
95 try:
96 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 from mercurial import registrar # since 3.7 (or 37d50250b696)
97
97
98 dir(registrar) # forcibly load it
98 dir(registrar) # forcibly load it
99 except ImportError:
99 except ImportError:
100 registrar = None
100 registrar = None
101 try:
101 try:
102 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
103 except ImportError:
103 except ImportError:
104 pass
104 pass
105 try:
105 try:
106 from mercurial.utils import repoviewutil # since 5.0
106 from mercurial.utils import repoviewutil # since 5.0
107 except ImportError:
107 except ImportError:
108 repoviewutil = None
108 repoviewutil = None
109 try:
109 try:
110 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
111 except ImportError:
111 except ImportError:
112 pass
112 pass
113 try:
113 try:
114 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
115 except ImportError:
115 except ImportError:
116 pass
116 pass
117
117
118 try:
118 try:
119 from mercurial import profiling
119 from mercurial import profiling
120 except ImportError:
120 except ImportError:
121 profiling = None
121 profiling = None
122
122
123 try:
123 try:
124 from mercurial.revlogutils import constants as revlog_constants
124 from mercurial.revlogutils import constants as revlog_constants
125
125
126 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
127
127
128 def revlog(opener, *args, **kwargs):
128 def revlog(opener, *args, **kwargs):
129 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
130
130
131
131
132 except (ImportError, AttributeError):
132 except (ImportError, AttributeError):
133 perf_rl_kind = None
133 perf_rl_kind = None
134
134
135 def revlog(opener, *args, **kwargs):
135 def revlog(opener, *args, **kwargs):
136 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 return mercurial.revlog.revlog(opener, *args, **kwargs)
137
137
138
138
139 def identity(a):
139 def identity(a):
140 return a
140 return a
141
141
142
142
143 try:
143 try:
144 from mercurial import pycompat
144 from mercurial import pycompat
145
145
146 getargspec = pycompat.getargspec # added to module after 4.5
146 getargspec = pycompat.getargspec # added to module after 4.5
147 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
148 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
149 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
150 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
151 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
152 if pycompat.ispy3:
152 if pycompat.ispy3:
153 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 _maxint = sys.maxsize # per py3 docs for replacing maxint
154 else:
154 else:
155 _maxint = sys.maxint
155 _maxint = sys.maxint
156 except (NameError, ImportError, AttributeError):
156 except (NameError, ImportError, AttributeError):
157 import inspect
157 import inspect
158
158
159 getargspec = inspect.getargspec
159 getargspec = inspect.getargspec
160 _byteskwargs = identity
160 _byteskwargs = identity
161 _bytestr = str
161 _bytestr = str
162 fsencode = identity # no py3 support
162 fsencode = identity # no py3 support
163 _maxint = sys.maxint # no py3 support
163 _maxint = sys.maxint # no py3 support
164 _sysstr = lambda x: x # no py3 support
164 _sysstr = lambda x: x # no py3 support
165 _xrange = xrange
165 _xrange = xrange
166
166
167 try:
167 try:
168 # 4.7+
168 # 4.7+
169 queue = pycompat.queue.Queue
169 queue = pycompat.queue.Queue
170 except (NameError, AttributeError, ImportError):
170 except (NameError, AttributeError, ImportError):
171 # <4.7.
171 # <4.7.
172 try:
172 try:
173 queue = pycompat.queue
173 queue = pycompat.queue
174 except (NameError, AttributeError, ImportError):
174 except (NameError, AttributeError, ImportError):
175 import Queue as queue
175 import Queue as queue
176
176
177 try:
177 try:
178 from mercurial import logcmdutil
178 from mercurial import logcmdutil
179
179
180 makelogtemplater = logcmdutil.maketemplater
180 makelogtemplater = logcmdutil.maketemplater
181 except (AttributeError, ImportError):
181 except (AttributeError, ImportError):
182 try:
182 try:
183 makelogtemplater = cmdutil.makelogtemplater
183 makelogtemplater = cmdutil.makelogtemplater
184 except (AttributeError, ImportError):
184 except (AttributeError, ImportError):
185 makelogtemplater = None
185 makelogtemplater = None
186
186
187 # for "historical portability":
187 # for "historical portability":
188 # define util.safehasattr forcibly, because util.safehasattr has been
188 # define util.safehasattr forcibly, because util.safehasattr has been
189 # available since 1.9.3 (or 94b200a11cf7)
189 # available since 1.9.3 (or 94b200a11cf7)
190 _undefined = object()
190 _undefined = object()
191
191
192
192
193 def safehasattr(thing, attr):
193 def safehasattr(thing, attr):
194 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
195
195
196
196
197 setattr(util, 'safehasattr', safehasattr)
197 setattr(util, 'safehasattr', safehasattr)
198
198
199 # for "historical portability":
199 # for "historical portability":
200 # define util.timer forcibly, because util.timer has been available
200 # define util.timer forcibly, because util.timer has been available
201 # since ae5d60bb70c9
201 # since ae5d60bb70c9
202 if safehasattr(time, 'perf_counter'):
202 if safehasattr(time, 'perf_counter'):
203 util.timer = time.perf_counter
203 util.timer = time.perf_counter
204 elif os.name == b'nt':
204 elif os.name == b'nt':
205 util.timer = time.clock
205 util.timer = time.clock
206 else:
206 else:
207 util.timer = time.time
207 util.timer = time.time
208
208
209 # for "historical portability":
209 # for "historical portability":
210 # use locally defined empty option list, if formatteropts isn't
210 # use locally defined empty option list, if formatteropts isn't
211 # available, because commands.formatteropts has been available since
211 # available, because commands.formatteropts has been available since
212 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
213 # available since 2.2 (or ae5f92e154d3)
213 # available since 2.2 (or ae5f92e154d3)
214 formatteropts = getattr(
214 formatteropts = getattr(
215 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
216 )
216 )
217
217
218 # for "historical portability":
218 # for "historical portability":
219 # use locally defined option list, if debugrevlogopts isn't available,
219 # use locally defined option list, if debugrevlogopts isn't available,
220 # because commands.debugrevlogopts has been available since 3.7 (or
220 # because commands.debugrevlogopts has been available since 3.7 (or
221 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
222 # since 1.9 (or a79fea6b3e77).
222 # since 1.9 (or a79fea6b3e77).
223 revlogopts = getattr(
223 revlogopts = getattr(
224 cmdutil,
224 cmdutil,
225 "debugrevlogopts",
225 "debugrevlogopts",
226 getattr(
226 getattr(
227 commands,
227 commands,
228 "debugrevlogopts",
228 "debugrevlogopts",
229 [
229 [
230 (b'c', b'changelog', False, b'open changelog'),
230 (b'c', b'changelog', False, b'open changelog'),
231 (b'm', b'manifest', False, b'open manifest'),
231 (b'm', b'manifest', False, b'open manifest'),
232 (b'', b'dir', False, b'open directory manifest'),
232 (b'', b'dir', False, b'open directory manifest'),
233 ],
233 ],
234 ),
234 ),
235 )
235 )
236
236
237 cmdtable = {}
237 cmdtable = {}
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop(object):
373 class noop(object):
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter(object):
417 class defaultformatter(object):
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 def _timer(
535 def _timer(
536 fm,
536 fm,
537 func,
537 func,
538 setup=None,
538 setup=None,
539 title=None,
539 title=None,
540 displayall=False,
540 displayall=False,
541 limits=DEFAULTLIMITS,
541 limits=DEFAULTLIMITS,
542 prerun=0,
542 prerun=0,
543 profiler=None,
543 profiler=None,
544 ):
544 ):
545 gc.collect()
545 gc.collect()
546 results = []
546 results = []
547 begin = util.timer()
547 begin = util.timer()
548 count = 0
548 count = 0
549 if profiler is None:
549 if profiler is None:
550 profiler = NOOPCTX
550 profiler = NOOPCTX
551 for i in range(prerun):
551 for i in range(prerun):
552 if setup is not None:
552 if setup is not None:
553 setup()
553 setup()
554 func()
554 func()
555 keepgoing = True
555 keepgoing = True
556 while keepgoing:
556 while keepgoing:
557 if setup is not None:
557 if setup is not None:
558 setup()
558 setup()
559 with profiler:
559 with profiler:
560 with timeone() as item:
560 with timeone() as item:
561 r = func()
561 r = func()
562 profiler = NOOPCTX
562 profiler = NOOPCTX
563 count += 1
563 count += 1
564 results.append(item[0])
564 results.append(item[0])
565 cstop = util.timer()
565 cstop = util.timer()
566 # Look for a stop condition.
566 # Look for a stop condition.
567 elapsed = cstop - begin
567 elapsed = cstop - begin
568 for t, mincount in limits:
568 for t, mincount in limits:
569 if elapsed >= t and count >= mincount:
569 if elapsed >= t and count >= mincount:
570 keepgoing = False
570 keepgoing = False
571 break
571 break
572
572
573 formatone(fm, results, title=title, result=r, displayall=displayall)
573 formatone(fm, results, title=title, result=r, displayall=displayall)
574
574
575
575
576 def formatone(fm, timings, title=None, result=None, displayall=False):
576 def formatone(fm, timings, title=None, result=None, displayall=False):
577
577
578 count = len(timings)
578 count = len(timings)
579
579
580 fm.startitem()
580 fm.startitem()
581
581
582 if title:
582 if title:
583 fm.write(b'title', b'! %s\n', title)
583 fm.write(b'title', b'! %s\n', title)
584 if result:
584 if result:
585 fm.write(b'result', b'! result: %s\n', result)
585 fm.write(b'result', b'! result: %s\n', result)
586
586
587 def display(role, entry):
587 def display(role, entry):
588 prefix = b''
588 prefix = b''
589 if role != b'best':
589 if role != b'best':
590 prefix = b'%s.' % role
590 prefix = b'%s.' % role
591 fm.plain(b'!')
591 fm.plain(b'!')
592 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'wall', b' wall %f', entry[0])
593 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
594 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'user', b' user %f', entry[1])
595 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'sys', b' sys %f', entry[2])
596 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
597 fm.plain(b'\n')
597 fm.plain(b'\n')
598
598
599 timings.sort()
599 timings.sort()
600 min_val = timings[0]
600 min_val = timings[0]
601 display(b'best', min_val)
601 display(b'best', min_val)
602 if displayall:
602 if displayall:
603 max_val = timings[-1]
603 max_val = timings[-1]
604 display(b'max', max_val)
604 display(b'max', max_val)
605 avg = tuple([sum(x) / count for x in zip(*timings)])
605 avg = tuple([sum(x) / count for x in zip(*timings)])
606 display(b'avg', avg)
606 display(b'avg', avg)
607 median = timings[len(timings) // 2]
607 median = timings[len(timings) // 2]
608 display(b'median', median)
608 display(b'median', median)
609
609
610
610
611 # utilities for historical portability
611 # utilities for historical portability
612
612
613
613
614 def getint(ui, section, name, default):
614 def getint(ui, section, name, default):
615 # for "historical portability":
615 # for "historical portability":
616 # ui.configint has been available since 1.9 (or fa2b596db182)
616 # ui.configint has been available since 1.9 (or fa2b596db182)
617 v = ui.config(section, name, None)
617 v = ui.config(section, name, None)
618 if v is None:
618 if v is None:
619 return default
619 return default
620 try:
620 try:
621 return int(v)
621 return int(v)
622 except ValueError:
622 except ValueError:
623 raise error.ConfigError(
623 raise error.ConfigError(
624 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 b"%s.%s is not an integer ('%s')" % (section, name, v)
625 )
625 )
626
626
627
627
628 def safeattrsetter(obj, name, ignoremissing=False):
628 def safeattrsetter(obj, name, ignoremissing=False):
629 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 """Ensure that 'obj' has 'name' attribute before subsequent setattr
630
630
631 This function is aborted, if 'obj' doesn't have 'name' attribute
631 This function is aborted, if 'obj' doesn't have 'name' attribute
632 at runtime. This avoids overlooking removal of an attribute, which
632 at runtime. This avoids overlooking removal of an attribute, which
633 breaks assumption of performance measurement, in the future.
633 breaks assumption of performance measurement, in the future.
634
634
635 This function returns the object to (1) assign a new value, and
635 This function returns the object to (1) assign a new value, and
636 (2) restore an original value to the attribute.
636 (2) restore an original value to the attribute.
637
637
638 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
639 abortion, and this function returns None. This is useful to
639 abortion, and this function returns None. This is useful to
640 examine an attribute, which isn't ensured in all Mercurial
640 examine an attribute, which isn't ensured in all Mercurial
641 versions.
641 versions.
642 """
642 """
643 if not util.safehasattr(obj, name):
643 if not util.safehasattr(obj, name):
644 if ignoremissing:
644 if ignoremissing:
645 return None
645 return None
646 raise error.Abort(
646 raise error.Abort(
647 (
647 (
648 b"missing attribute %s of %s might break assumption"
648 b"missing attribute %s of %s might break assumption"
649 b" of performance measurement"
649 b" of performance measurement"
650 )
650 )
651 % (name, obj)
651 % (name, obj)
652 )
652 )
653
653
654 origvalue = getattr(obj, _sysstr(name))
654 origvalue = getattr(obj, _sysstr(name))
655
655
656 class attrutil(object):
656 class attrutil(object):
657 def set(self, newvalue):
657 def set(self, newvalue):
658 setattr(obj, _sysstr(name), newvalue)
658 setattr(obj, _sysstr(name), newvalue)
659
659
660 def restore(self):
660 def restore(self):
661 setattr(obj, _sysstr(name), origvalue)
661 setattr(obj, _sysstr(name), origvalue)
662
662
663 return attrutil()
663 return attrutil()
664
664
665
665
666 # utilities to examine each internal API changes
666 # utilities to examine each internal API changes
667
667
668
668
669 def getbranchmapsubsettable():
669 def getbranchmapsubsettable():
670 # for "historical portability":
670 # for "historical portability":
671 # subsettable is defined in:
671 # subsettable is defined in:
672 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - branchmap since 2.9 (or 175c6fd8cacc)
673 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoview since 2.5 (or 59a9f18d4587)
674 # - repoviewutil since 5.0
674 # - repoviewutil since 5.0
675 for mod in (branchmap, repoview, repoviewutil):
675 for mod in (branchmap, repoview, repoviewutil):
676 subsettable = getattr(mod, 'subsettable', None)
676 subsettable = getattr(mod, 'subsettable', None)
677 if subsettable:
677 if subsettable:
678 return subsettable
678 return subsettable
679
679
680 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
681 # branchmap and repoview modules exist, but subsettable attribute
681 # branchmap and repoview modules exist, but subsettable attribute
682 # doesn't)
682 # doesn't)
683 raise error.Abort(
683 raise error.Abort(
684 b"perfbranchmap not available with this Mercurial",
684 b"perfbranchmap not available with this Mercurial",
685 hint=b"use 2.5 or later",
685 hint=b"use 2.5 or later",
686 )
686 )
687
687
688
688
689 def getsvfs(repo):
689 def getsvfs(repo):
690 """Return appropriate object to access files under .hg/store"""
690 """Return appropriate object to access files under .hg/store"""
691 # for "historical portability":
691 # for "historical portability":
692 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 # repo.svfs has been available since 2.3 (or 7034365089bf)
693 svfs = getattr(repo, 'svfs', None)
693 svfs = getattr(repo, 'svfs', None)
694 if svfs:
694 if svfs:
695 return svfs
695 return svfs
696 else:
696 else:
697 return getattr(repo, 'sopener')
697 return getattr(repo, 'sopener')
698
698
699
699
700 def getvfs(repo):
700 def getvfs(repo):
701 """Return appropriate object to access files under .hg"""
701 """Return appropriate object to access files under .hg"""
702 # for "historical portability":
702 # for "historical portability":
703 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 # repo.vfs has been available since 2.3 (or 7034365089bf)
704 vfs = getattr(repo, 'vfs', None)
704 vfs = getattr(repo, 'vfs', None)
705 if vfs:
705 if vfs:
706 return vfs
706 return vfs
707 else:
707 else:
708 return getattr(repo, 'opener')
708 return getattr(repo, 'opener')
709
709
710
710
711 def repocleartagscachefunc(repo):
711 def repocleartagscachefunc(repo):
712 """Return the function to clear tags cache according to repo internal API"""
712 """Return the function to clear tags cache according to repo internal API"""
713 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
714 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # in this case, setattr(repo, '_tagscache', None) or so isn't
715 # correct way to clear tags cache, because existing code paths
715 # correct way to clear tags cache, because existing code paths
716 # expect _tagscache to be a structured object.
716 # expect _tagscache to be a structured object.
717 def clearcache():
717 def clearcache():
718 # _tagscache has been filteredpropertycache since 2.5 (or
718 # _tagscache has been filteredpropertycache since 2.5 (or
719 # 98c867ac1330), and delattr() can't work in such case
719 # 98c867ac1330), and delattr() can't work in such case
720 if '_tagscache' in vars(repo):
720 if '_tagscache' in vars(repo):
721 del repo.__dict__['_tagscache']
721 del repo.__dict__['_tagscache']
722
722
723 return clearcache
723 return clearcache
724
724
725 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
726 if repotags: # since 1.4 (or 5614a628d173)
726 if repotags: # since 1.4 (or 5614a628d173)
727 return lambda: repotags.set(None)
727 return lambda: repotags.set(None)
728
728
729 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
730 if repotagscache: # since 0.6 (or d7df759d0e97)
730 if repotagscache: # since 0.6 (or d7df759d0e97)
731 return lambda: repotagscache.set(None)
731 return lambda: repotagscache.set(None)
732
732
733 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
734 # this point, but it isn't so problematic, because:
734 # this point, but it isn't so problematic, because:
735 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
736 # in perftags() causes failure soon
736 # in perftags() causes failure soon
737 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 # - perf.py itself has been available since 1.1 (or eb240755386d)
738 raise error.Abort(b"tags API of this hg command is unknown")
738 raise error.Abort(b"tags API of this hg command is unknown")
739
739
740
740
741 # utilities to clear cache
741 # utilities to clear cache
742
742
743
743
744 def clearfilecache(obj, attrname):
744 def clearfilecache(obj, attrname):
745 unfiltered = getattr(obj, 'unfiltered', None)
745 unfiltered = getattr(obj, 'unfiltered', None)
746 if unfiltered is not None:
746 if unfiltered is not None:
747 obj = obj.unfiltered()
747 obj = obj.unfiltered()
748 if attrname in vars(obj):
748 if attrname in vars(obj):
749 delattr(obj, attrname)
749 delattr(obj, attrname)
750 obj._filecache.pop(attrname, None)
750 obj._filecache.pop(attrname, None)
751
751
752
752
753 def clearchangelog(repo):
753 def clearchangelog(repo):
754 if repo is not repo.unfiltered():
754 if repo is not repo.unfiltered():
755 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcachekey', None)
756 object.__setattr__(repo, '_clcache', None)
756 object.__setattr__(repo, '_clcache', None)
757 clearfilecache(repo.unfiltered(), 'changelog')
757 clearfilecache(repo.unfiltered(), 'changelog')
758
758
759
759
760 # perf commands
760 # perf commands
761
761
762
762
763 @command(b'perf::walk|perfwalk', formatteropts)
763 @command(b'perf::walk|perfwalk', formatteropts)
764 def perfwalk(ui, repo, *pats, **opts):
764 def perfwalk(ui, repo, *pats, **opts):
765 opts = _byteskwargs(opts)
765 opts = _byteskwargs(opts)
766 timer, fm = gettimer(ui, opts)
766 timer, fm = gettimer(ui, opts)
767 m = scmutil.match(repo[None], pats, {})
767 m = scmutil.match(repo[None], pats, {})
768 timer(
768 timer(
769 lambda: len(
769 lambda: len(
770 list(
770 list(
771 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
772 )
772 )
773 )
773 )
774 )
774 )
775 fm.end()
775 fm.end()
776
776
777
777
778 @command(b'perf::annotate|perfannotate', formatteropts)
778 @command(b'perf::annotate|perfannotate', formatteropts)
779 def perfannotate(ui, repo, f, **opts):
779 def perfannotate(ui, repo, f, **opts):
780 opts = _byteskwargs(opts)
780 opts = _byteskwargs(opts)
781 timer, fm = gettimer(ui, opts)
781 timer, fm = gettimer(ui, opts)
782 fc = repo[b'.'][f]
782 fc = repo[b'.'][f]
783 timer(lambda: len(fc.annotate(True)))
783 timer(lambda: len(fc.annotate(True)))
784 fm.end()
784 fm.end()
785
785
786
786
787 @command(
787 @command(
788 b'perf::status|perfstatus',
788 b'perf::status|perfstatus',
789 [
789 [
790 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'u', b'unknown', False, b'ask status to look for unknown files'),
791 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
792 ]
792 ]
793 + formatteropts,
793 + formatteropts,
794 )
794 )
795 def perfstatus(ui, repo, **opts):
795 def perfstatus(ui, repo, **opts):
796 """benchmark the performance of a single status call
796 """benchmark the performance of a single status call
797
797
798 The repository data are preserved between each call.
798 The repository data are preserved between each call.
799
799
800 By default, only the status of the tracked file are requested. If
800 By default, only the status of the tracked file are requested. If
801 `--unknown` is passed, the "unknown" files are also tracked.
801 `--unknown` is passed, the "unknown" files are also tracked.
802 """
802 """
803 opts = _byteskwargs(opts)
803 opts = _byteskwargs(opts)
804 # m = match.always(repo.root, repo.getcwd())
804 # m = match.always(repo.root, repo.getcwd())
805 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
806 # False))))
806 # False))))
807 timer, fm = gettimer(ui, opts)
807 timer, fm = gettimer(ui, opts)
808 if opts[b'dirstate']:
808 if opts[b'dirstate']:
809 dirstate = repo.dirstate
809 dirstate = repo.dirstate
810 m = scmutil.matchall(repo)
810 m = scmutil.matchall(repo)
811 unknown = opts[b'unknown']
811 unknown = opts[b'unknown']
812
812
813 def status_dirstate():
813 def status_dirstate():
814 s = dirstate.status(
814 s = dirstate.status(
815 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 m, subrepos=[], ignored=False, clean=False, unknown=unknown
816 )
816 )
817 sum(map(bool, s))
817 sum(map(bool, s))
818
818
819 timer(status_dirstate)
819 timer(status_dirstate)
820 else:
820 else:
821 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
822 fm.end()
822 fm.end()
823
823
824
824
825 @command(b'perf::addremove|perfaddremove', formatteropts)
825 @command(b'perf::addremove|perfaddremove', formatteropts)
826 def perfaddremove(ui, repo, **opts):
826 def perfaddremove(ui, repo, **opts):
827 opts = _byteskwargs(opts)
827 opts = _byteskwargs(opts)
828 timer, fm = gettimer(ui, opts)
828 timer, fm = gettimer(ui, opts)
829 try:
829 try:
830 oldquiet = repo.ui.quiet
830 oldquiet = repo.ui.quiet
831 repo.ui.quiet = True
831 repo.ui.quiet = True
832 matcher = scmutil.match(repo[None])
832 matcher = scmutil.match(repo[None])
833 opts[b'dry_run'] = True
833 opts[b'dry_run'] = True
834 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 if 'uipathfn' in getargspec(scmutil.addremove).args:
835 uipathfn = scmutil.getuipathfn(repo)
835 uipathfn = scmutil.getuipathfn(repo)
836 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
837 else:
837 else:
838 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
839 finally:
839 finally:
840 repo.ui.quiet = oldquiet
840 repo.ui.quiet = oldquiet
841 fm.end()
841 fm.end()
842
842
843
843
844 def clearcaches(cl):
844 def clearcaches(cl):
845 # behave somewhat consistently across internal API changes
845 # behave somewhat consistently across internal API changes
846 if util.safehasattr(cl, b'clearcaches'):
846 if util.safehasattr(cl, b'clearcaches'):
847 cl.clearcaches()
847 cl.clearcaches()
848 elif util.safehasattr(cl, b'_nodecache'):
848 elif util.safehasattr(cl, b'_nodecache'):
849 # <= hg-5.2
849 # <= hg-5.2
850 from mercurial.node import nullid, nullrev
850 from mercurial.node import nullid, nullrev
851
851
852 cl._nodecache = {nullid: nullrev}
852 cl._nodecache = {nullid: nullrev}
853 cl._nodepos = None
853 cl._nodepos = None
854
854
855
855
856 @command(b'perf::heads|perfheads', formatteropts)
856 @command(b'perf::heads|perfheads', formatteropts)
857 def perfheads(ui, repo, **opts):
857 def perfheads(ui, repo, **opts):
858 """benchmark the computation of a changelog heads"""
858 """benchmark the computation of a changelog heads"""
859 opts = _byteskwargs(opts)
859 opts = _byteskwargs(opts)
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 cl = repo.changelog
861 cl = repo.changelog
862
862
863 def s():
863 def s():
864 clearcaches(cl)
864 clearcaches(cl)
865
865
866 def d():
866 def d():
867 len(cl.headrevs())
867 len(cl.headrevs())
868
868
869 timer(d, setup=s)
869 timer(d, setup=s)
870 fm.end()
870 fm.end()
871
871
872
872
873 @command(
873 @command(
874 b'perf::tags|perftags',
874 b'perf::tags|perftags',
875 formatteropts
875 formatteropts
876 + [
876 + [
877 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
878 ],
878 ],
879 )
879 )
880 def perftags(ui, repo, **opts):
880 def perftags(ui, repo, **opts):
881 opts = _byteskwargs(opts)
881 opts = _byteskwargs(opts)
882 timer, fm = gettimer(ui, opts)
882 timer, fm = gettimer(ui, opts)
883 repocleartagscache = repocleartagscachefunc(repo)
883 repocleartagscache = repocleartagscachefunc(repo)
884 clearrevlogs = opts[b'clear_revlogs']
884 clearrevlogs = opts[b'clear_revlogs']
885
885
886 def s():
886 def s():
887 if clearrevlogs:
887 if clearrevlogs:
888 clearchangelog(repo)
888 clearchangelog(repo)
889 clearfilecache(repo.unfiltered(), 'manifest')
889 clearfilecache(repo.unfiltered(), 'manifest')
890 repocleartagscache()
890 repocleartagscache()
891
891
892 def t():
892 def t():
893 return len(repo.tags())
893 return len(repo.tags())
894
894
895 timer(t, setup=s)
895 timer(t, setup=s)
896 fm.end()
896 fm.end()
897
897
898
898
899 @command(b'perf::ancestors|perfancestors', formatteropts)
899 @command(b'perf::ancestors|perfancestors', formatteropts)
900 def perfancestors(ui, repo, **opts):
900 def perfancestors(ui, repo, **opts):
901 opts = _byteskwargs(opts)
901 opts = _byteskwargs(opts)
902 timer, fm = gettimer(ui, opts)
902 timer, fm = gettimer(ui, opts)
903 heads = repo.changelog.headrevs()
903 heads = repo.changelog.headrevs()
904
904
905 def d():
905 def d():
906 for a in repo.changelog.ancestors(heads):
906 for a in repo.changelog.ancestors(heads):
907 pass
907 pass
908
908
909 timer(d)
909 timer(d)
910 fm.end()
910 fm.end()
911
911
912
912
913 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 @command(b'perf::ancestorset|perfancestorset', formatteropts)
914 def perfancestorset(ui, repo, revset, **opts):
914 def perfancestorset(ui, repo, revset, **opts):
915 opts = _byteskwargs(opts)
915 opts = _byteskwargs(opts)
916 timer, fm = gettimer(ui, opts)
916 timer, fm = gettimer(ui, opts)
917 revs = repo.revs(revset)
917 revs = repo.revs(revset)
918 heads = repo.changelog.headrevs()
918 heads = repo.changelog.headrevs()
919
919
920 def d():
920 def d():
921 s = repo.changelog.ancestors(heads)
921 s = repo.changelog.ancestors(heads)
922 for rev in revs:
922 for rev in revs:
923 rev in s
923 rev in s
924
924
925 timer(d)
925 timer(d)
926 fm.end()
926 fm.end()
927
927
928
928
929 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
929 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
930 def perfdiscovery(ui, repo, path, **opts):
930 def perfdiscovery(ui, repo, path, **opts):
931 """benchmark discovery between local repo and the peer at given path"""
931 """benchmark discovery between local repo and the peer at given path"""
932 repos = [repo, None]
932 repos = [repo, None]
933 timer, fm = gettimer(ui, opts)
933 timer, fm = gettimer(ui, opts)
934
934
935 try:
935 try:
936 from mercurial.utils.urlutil import get_unique_pull_path
936 from mercurial.utils.urlutil import get_unique_pull_path
937
937
938 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
938 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
939 except ImportError:
939 except ImportError:
940 path = ui.expandpath(path)
940 path = ui.expandpath(path)
941
941
942 def s():
942 def s():
943 repos[1] = hg.peer(ui, opts, path)
943 repos[1] = hg.peer(ui, opts, path)
944
944
945 def d():
945 def d():
946 setdiscovery.findcommonheads(ui, *repos)
946 setdiscovery.findcommonheads(ui, *repos)
947
947
948 timer(d, setup=s)
948 timer(d, setup=s)
949 fm.end()
949 fm.end()
950
950
951
951
952 @command(
952 @command(
953 b'perf::bookmarks|perfbookmarks',
953 b'perf::bookmarks|perfbookmarks',
954 formatteropts
954 formatteropts
955 + [
955 + [
956 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
956 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
957 ],
957 ],
958 )
958 )
959 def perfbookmarks(ui, repo, **opts):
959 def perfbookmarks(ui, repo, **opts):
960 """benchmark parsing bookmarks from disk to memory"""
960 """benchmark parsing bookmarks from disk to memory"""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963
963
964 clearrevlogs = opts[b'clear_revlogs']
964 clearrevlogs = opts[b'clear_revlogs']
965
965
966 def s():
966 def s():
967 if clearrevlogs:
967 if clearrevlogs:
968 clearchangelog(repo)
968 clearchangelog(repo)
969 clearfilecache(repo, b'_bookmarks')
969 clearfilecache(repo, b'_bookmarks')
970
970
971 def d():
971 def d():
972 repo._bookmarks
972 repo._bookmarks
973
973
974 timer(d, setup=s)
974 timer(d, setup=s)
975 fm.end()
975 fm.end()
976
976
977
977
978 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
978 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
979 def perfbundleread(ui, repo, bundlepath, **opts):
979 def perfbundleread(ui, repo, bundlepath, **opts):
980 """Benchmark reading of bundle files.
980 """Benchmark reading of bundle files.
981
981
982 This command is meant to isolate the I/O part of bundle reading as
982 This command is meant to isolate the I/O part of bundle reading as
983 much as possible.
983 much as possible.
984 """
984 """
985 from mercurial import (
985 from mercurial import (
986 bundle2,
986 bundle2,
987 exchange,
987 exchange,
988 streamclone,
988 streamclone,
989 )
989 )
990
990
991 opts = _byteskwargs(opts)
991 opts = _byteskwargs(opts)
992
992
993 def makebench(fn):
993 def makebench(fn):
994 def run():
994 def run():
995 with open(bundlepath, b'rb') as fh:
995 with open(bundlepath, b'rb') as fh:
996 bundle = exchange.readbundle(ui, fh, bundlepath)
996 bundle = exchange.readbundle(ui, fh, bundlepath)
997 fn(bundle)
997 fn(bundle)
998
998
999 return run
999 return run
1000
1000
1001 def makereadnbytes(size):
1001 def makereadnbytes(size):
1002 def run():
1002 def run():
1003 with open(bundlepath, b'rb') as fh:
1003 with open(bundlepath, b'rb') as fh:
1004 bundle = exchange.readbundle(ui, fh, bundlepath)
1004 bundle = exchange.readbundle(ui, fh, bundlepath)
1005 while bundle.read(size):
1005 while bundle.read(size):
1006 pass
1006 pass
1007
1007
1008 return run
1008 return run
1009
1009
1010 def makestdioread(size):
1010 def makestdioread(size):
1011 def run():
1011 def run():
1012 with open(bundlepath, b'rb') as fh:
1012 with open(bundlepath, b'rb') as fh:
1013 while fh.read(size):
1013 while fh.read(size):
1014 pass
1014 pass
1015
1015
1016 return run
1016 return run
1017
1017
1018 # bundle1
1018 # bundle1
1019
1019
1020 def deltaiter(bundle):
1020 def deltaiter(bundle):
1021 for delta in bundle.deltaiter():
1021 for delta in bundle.deltaiter():
1022 pass
1022 pass
1023
1023
1024 def iterchunks(bundle):
1024 def iterchunks(bundle):
1025 for chunk in bundle.getchunks():
1025 for chunk in bundle.getchunks():
1026 pass
1026 pass
1027
1027
1028 # bundle2
1028 # bundle2
1029
1029
1030 def forwardchunks(bundle):
1030 def forwardchunks(bundle):
1031 for chunk in bundle._forwardchunks():
1031 for chunk in bundle._forwardchunks():
1032 pass
1032 pass
1033
1033
1034 def iterparts(bundle):
1034 def iterparts(bundle):
1035 for part in bundle.iterparts():
1035 for part in bundle.iterparts():
1036 pass
1036 pass
1037
1037
1038 def iterpartsseekable(bundle):
1038 def iterpartsseekable(bundle):
1039 for part in bundle.iterparts(seekable=True):
1039 for part in bundle.iterparts(seekable=True):
1040 pass
1040 pass
1041
1041
1042 def seek(bundle):
1042 def seek(bundle):
1043 for part in bundle.iterparts(seekable=True):
1043 for part in bundle.iterparts(seekable=True):
1044 part.seek(0, os.SEEK_END)
1044 part.seek(0, os.SEEK_END)
1045
1045
1046 def makepartreadnbytes(size):
1046 def makepartreadnbytes(size):
1047 def run():
1047 def run():
1048 with open(bundlepath, b'rb') as fh:
1048 with open(bundlepath, b'rb') as fh:
1049 bundle = exchange.readbundle(ui, fh, bundlepath)
1049 bundle = exchange.readbundle(ui, fh, bundlepath)
1050 for part in bundle.iterparts():
1050 for part in bundle.iterparts():
1051 while part.read(size):
1051 while part.read(size):
1052 pass
1052 pass
1053
1053
1054 return run
1054 return run
1055
1055
1056 benches = [
1056 benches = [
1057 (makestdioread(8192), b'read(8k)'),
1057 (makestdioread(8192), b'read(8k)'),
1058 (makestdioread(16384), b'read(16k)'),
1058 (makestdioread(16384), b'read(16k)'),
1059 (makestdioread(32768), b'read(32k)'),
1059 (makestdioread(32768), b'read(32k)'),
1060 (makestdioread(131072), b'read(128k)'),
1060 (makestdioread(131072), b'read(128k)'),
1061 ]
1061 ]
1062
1062
1063 with open(bundlepath, b'rb') as fh:
1063 with open(bundlepath, b'rb') as fh:
1064 bundle = exchange.readbundle(ui, fh, bundlepath)
1064 bundle = exchange.readbundle(ui, fh, bundlepath)
1065
1065
1066 if isinstance(bundle, changegroup.cg1unpacker):
1066 if isinstance(bundle, changegroup.cg1unpacker):
1067 benches.extend(
1067 benches.extend(
1068 [
1068 [
1069 (makebench(deltaiter), b'cg1 deltaiter()'),
1069 (makebench(deltaiter), b'cg1 deltaiter()'),
1070 (makebench(iterchunks), b'cg1 getchunks()'),
1070 (makebench(iterchunks), b'cg1 getchunks()'),
1071 (makereadnbytes(8192), b'cg1 read(8k)'),
1071 (makereadnbytes(8192), b'cg1 read(8k)'),
1072 (makereadnbytes(16384), b'cg1 read(16k)'),
1072 (makereadnbytes(16384), b'cg1 read(16k)'),
1073 (makereadnbytes(32768), b'cg1 read(32k)'),
1073 (makereadnbytes(32768), b'cg1 read(32k)'),
1074 (makereadnbytes(131072), b'cg1 read(128k)'),
1074 (makereadnbytes(131072), b'cg1 read(128k)'),
1075 ]
1075 ]
1076 )
1076 )
1077 elif isinstance(bundle, bundle2.unbundle20):
1077 elif isinstance(bundle, bundle2.unbundle20):
1078 benches.extend(
1078 benches.extend(
1079 [
1079 [
1080 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1080 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1081 (makebench(iterparts), b'bundle2 iterparts()'),
1081 (makebench(iterparts), b'bundle2 iterparts()'),
1082 (
1082 (
1083 makebench(iterpartsseekable),
1083 makebench(iterpartsseekable),
1084 b'bundle2 iterparts() seekable',
1084 b'bundle2 iterparts() seekable',
1085 ),
1085 ),
1086 (makebench(seek), b'bundle2 part seek()'),
1086 (makebench(seek), b'bundle2 part seek()'),
1087 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1087 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1088 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1088 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1089 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1089 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1090 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1090 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1091 ]
1091 ]
1092 )
1092 )
1093 elif isinstance(bundle, streamclone.streamcloneapplier):
1093 elif isinstance(bundle, streamclone.streamcloneapplier):
1094 raise error.Abort(b'stream clone bundles not supported')
1094 raise error.Abort(b'stream clone bundles not supported')
1095 else:
1095 else:
1096 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1096 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1097
1097
1098 for fn, title in benches:
1098 for fn, title in benches:
1099 timer, fm = gettimer(ui, opts)
1099 timer, fm = gettimer(ui, opts)
1100 timer(fn, title=title)
1100 timer(fn, title=title)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(
1104 @command(
1105 b'perf::changegroupchangelog|perfchangegroupchangelog',
1105 b'perf::changegroupchangelog|perfchangegroupchangelog',
1106 formatteropts
1106 formatteropts
1107 + [
1107 + [
1108 (b'', b'cgversion', b'02', b'changegroup version'),
1108 (b'', b'cgversion', b'02', b'changegroup version'),
1109 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1109 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1110 ],
1110 ],
1111 )
1111 )
1112 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1112 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1113 """Benchmark producing a changelog group for a changegroup.
1113 """Benchmark producing a changelog group for a changegroup.
1114
1114
1115 This measures the time spent processing the changelog during a
1115 This measures the time spent processing the changelog during a
1116 bundle operation. This occurs during `hg bundle` and on a server
1116 bundle operation. This occurs during `hg bundle` and on a server
1117 processing a `getbundle` wire protocol request (handles clones
1117 processing a `getbundle` wire protocol request (handles clones
1118 and pull requests).
1118 and pull requests).
1119
1119
1120 By default, all revisions are added to the changegroup.
1120 By default, all revisions are added to the changegroup.
1121 """
1121 """
1122 opts = _byteskwargs(opts)
1122 opts = _byteskwargs(opts)
1123 cl = repo.changelog
1123 cl = repo.changelog
1124 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1124 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1125 bundler = changegroup.getbundler(cgversion, repo)
1125 bundler = changegroup.getbundler(cgversion, repo)
1126
1126
1127 def d():
1127 def d():
1128 state, chunks = bundler._generatechangelog(cl, nodes)
1128 state, chunks = bundler._generatechangelog(cl, nodes)
1129 for chunk in chunks:
1129 for chunk in chunks:
1130 pass
1130 pass
1131
1131
1132 timer, fm = gettimer(ui, opts)
1132 timer, fm = gettimer(ui, opts)
1133
1133
1134 # Terminal printing can interfere with timing. So disable it.
1134 # Terminal printing can interfere with timing. So disable it.
1135 with ui.configoverride({(b'progress', b'disable'): True}):
1135 with ui.configoverride({(b'progress', b'disable'): True}):
1136 timer(d)
1136 timer(d)
1137
1137
1138 fm.end()
1138 fm.end()
1139
1139
1140
1140
1141 @command(b'perf::dirs|perfdirs', formatteropts)
1141 @command(b'perf::dirs|perfdirs', formatteropts)
1142 def perfdirs(ui, repo, **opts):
1142 def perfdirs(ui, repo, **opts):
1143 opts = _byteskwargs(opts)
1143 opts = _byteskwargs(opts)
1144 timer, fm = gettimer(ui, opts)
1144 timer, fm = gettimer(ui, opts)
1145 dirstate = repo.dirstate
1145 dirstate = repo.dirstate
1146 b'a' in dirstate
1146 b'a' in dirstate
1147
1147
1148 def d():
1148 def d():
1149 dirstate.hasdir(b'a')
1149 dirstate.hasdir(b'a')
1150 del dirstate._map._dirs
1150 del dirstate._map._dirs
1151
1151
1152 timer(d)
1152 timer(d)
1153 fm.end()
1153 fm.end()
1154
1154
1155
1155
1156 @command(
1156 @command(
1157 b'perf::dirstate|perfdirstate',
1157 b'perf::dirstate|perfdirstate',
1158 [
1158 [
1159 (
1159 (
1160 b'',
1160 b'',
1161 b'iteration',
1161 b'iteration',
1162 None,
1162 None,
1163 b'benchmark a full iteration for the dirstate',
1163 b'benchmark a full iteration for the dirstate',
1164 ),
1164 ),
1165 (
1165 (
1166 b'',
1166 b'',
1167 b'contains',
1167 b'contains',
1168 None,
1168 None,
1169 b'benchmark a large amount of `nf in dirstate` calls',
1169 b'benchmark a large amount of `nf in dirstate` calls',
1170 ),
1170 ),
1171 ]
1171 ]
1172 + formatteropts,
1172 + formatteropts,
1173 )
1173 )
1174 def perfdirstate(ui, repo, **opts):
1174 def perfdirstate(ui, repo, **opts):
1175 """benchmap the time of various distate operations
1175 """benchmap the time of various distate operations
1176
1176
1177 By default benchmark the time necessary to load a dirstate from scratch.
1177 By default benchmark the time necessary to load a dirstate from scratch.
1178 The dirstate is loaded to the point were a "contains" request can be
1178 The dirstate is loaded to the point were a "contains" request can be
1179 answered.
1179 answered.
1180 """
1180 """
1181 opts = _byteskwargs(opts)
1181 opts = _byteskwargs(opts)
1182 timer, fm = gettimer(ui, opts)
1182 timer, fm = gettimer(ui, opts)
1183 b"a" in repo.dirstate
1183 b"a" in repo.dirstate
1184
1184
1185 if opts[b'iteration'] and opts[b'contains']:
1185 if opts[b'iteration'] and opts[b'contains']:
1186 msg = b'only specify one of --iteration or --contains'
1186 msg = b'only specify one of --iteration or --contains'
1187 raise error.Abort(msg)
1187 raise error.Abort(msg)
1188
1188
1189 if opts[b'iteration']:
1189 if opts[b'iteration']:
1190 setup = None
1190 setup = None
1191 dirstate = repo.dirstate
1191 dirstate = repo.dirstate
1192
1192
1193 def d():
1193 def d():
1194 for f in dirstate:
1194 for f in dirstate:
1195 pass
1195 pass
1196
1196
1197 elif opts[b'contains']:
1197 elif opts[b'contains']:
1198 setup = None
1198 setup = None
1199 dirstate = repo.dirstate
1199 dirstate = repo.dirstate
1200 allfiles = list(dirstate)
1200 allfiles = list(dirstate)
1201 # also add file path that will be "missing" from the dirstate
1201 # also add file path that will be "missing" from the dirstate
1202 allfiles.extend([f[::-1] for f in allfiles])
1202 allfiles.extend([f[::-1] for f in allfiles])
1203
1203
1204 def d():
1204 def d():
1205 for f in allfiles:
1205 for f in allfiles:
1206 f in dirstate
1206 f in dirstate
1207
1207
1208 else:
1208 else:
1209
1209
1210 def setup():
1210 def setup():
1211 repo.dirstate.invalidate()
1211 repo.dirstate.invalidate()
1212
1212
1213 def d():
1213 def d():
1214 b"a" in repo.dirstate
1214 b"a" in repo.dirstate
1215
1215
1216 timer(d, setup=setup)
1216 timer(d, setup=setup)
1217 fm.end()
1217 fm.end()
1218
1218
1219
1219
1220 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1220 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1221 def perfdirstatedirs(ui, repo, **opts):
1221 def perfdirstatedirs(ui, repo, **opts):
1222 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1222 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1223 opts = _byteskwargs(opts)
1223 opts = _byteskwargs(opts)
1224 timer, fm = gettimer(ui, opts)
1224 timer, fm = gettimer(ui, opts)
1225 repo.dirstate.hasdir(b"a")
1225 repo.dirstate.hasdir(b"a")
1226
1226
1227 def setup():
1227 def setup():
1228 del repo.dirstate._map._dirs
1228 del repo.dirstate._map._dirs
1229
1229
1230 def d():
1230 def d():
1231 repo.dirstate.hasdir(b"a")
1231 repo.dirstate.hasdir(b"a")
1232
1232
1233 timer(d, setup=setup)
1233 timer(d, setup=setup)
1234 fm.end()
1234 fm.end()
1235
1235
1236
1236
1237 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1237 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1238 def perfdirstatefoldmap(ui, repo, **opts):
1238 def perfdirstatefoldmap(ui, repo, **opts):
1239 """benchmap a `dirstate._map.filefoldmap.get()` request
1239 """benchmap a `dirstate._map.filefoldmap.get()` request
1240
1240
1241 The dirstate filefoldmap cache is dropped between every request.
1241 The dirstate filefoldmap cache is dropped between every request.
1242 """
1242 """
1243 opts = _byteskwargs(opts)
1243 opts = _byteskwargs(opts)
1244 timer, fm = gettimer(ui, opts)
1244 timer, fm = gettimer(ui, opts)
1245 dirstate = repo.dirstate
1245 dirstate = repo.dirstate
1246 dirstate._map.filefoldmap.get(b'a')
1246 dirstate._map.filefoldmap.get(b'a')
1247
1247
1248 def setup():
1248 def setup():
1249 del dirstate._map.filefoldmap
1249 del dirstate._map.filefoldmap
1250
1250
1251 def d():
1251 def d():
1252 dirstate._map.filefoldmap.get(b'a')
1252 dirstate._map.filefoldmap.get(b'a')
1253
1253
1254 timer(d, setup=setup)
1254 timer(d, setup=setup)
1255 fm.end()
1255 fm.end()
1256
1256
1257
1257
1258 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1258 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1259 def perfdirfoldmap(ui, repo, **opts):
1259 def perfdirfoldmap(ui, repo, **opts):
1260 """benchmap a `dirstate._map.dirfoldmap.get()` request
1260 """benchmap a `dirstate._map.dirfoldmap.get()` request
1261
1261
1262 The dirstate dirfoldmap cache is dropped between every request.
1262 The dirstate dirfoldmap cache is dropped between every request.
1263 """
1263 """
1264 opts = _byteskwargs(opts)
1264 opts = _byteskwargs(opts)
1265 timer, fm = gettimer(ui, opts)
1265 timer, fm = gettimer(ui, opts)
1266 dirstate = repo.dirstate
1266 dirstate = repo.dirstate
1267 dirstate._map.dirfoldmap.get(b'a')
1267 dirstate._map.dirfoldmap.get(b'a')
1268
1268
1269 def setup():
1269 def setup():
1270 del dirstate._map.dirfoldmap
1270 del dirstate._map.dirfoldmap
1271 del dirstate._map._dirs
1271 del dirstate._map._dirs
1272
1272
1273 def d():
1273 def d():
1274 dirstate._map.dirfoldmap.get(b'a')
1274 dirstate._map.dirfoldmap.get(b'a')
1275
1275
1276 timer(d, setup=setup)
1276 timer(d, setup=setup)
1277 fm.end()
1277 fm.end()
1278
1278
1279
1279
1280 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1280 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1281 def perfdirstatewrite(ui, repo, **opts):
1281 def perfdirstatewrite(ui, repo, **opts):
1282 """benchmap the time it take to write a dirstate on disk"""
1282 """benchmap the time it take to write a dirstate on disk"""
1283 opts = _byteskwargs(opts)
1283 opts = _byteskwargs(opts)
1284 timer, fm = gettimer(ui, opts)
1284 timer, fm = gettimer(ui, opts)
1285 ds = repo.dirstate
1285 ds = repo.dirstate
1286 b"a" in ds
1286 b"a" in ds
1287
1287
1288 def setup():
1288 def setup():
1289 ds._dirty = True
1289 ds._dirty = True
1290
1290
1291 def d():
1291 def d():
1292 ds.write(repo.currenttransaction())
1292 ds.write(repo.currenttransaction())
1293
1293
1294 timer(d, setup=setup)
1294 timer(d, setup=setup)
1295 fm.end()
1295 fm.end()
1296
1296
1297
1297
1298 def _getmergerevs(repo, opts):
1298 def _getmergerevs(repo, opts):
1299 """parse command argument to return rev involved in merge
1299 """parse command argument to return rev involved in merge
1300
1300
1301 input: options dictionnary with `rev`, `from` and `bse`
1301 input: options dictionnary with `rev`, `from` and `bse`
1302 output: (localctx, otherctx, basectx)
1302 output: (localctx, otherctx, basectx)
1303 """
1303 """
1304 if opts[b'from']:
1304 if opts[b'from']:
1305 fromrev = scmutil.revsingle(repo, opts[b'from'])
1305 fromrev = scmutil.revsingle(repo, opts[b'from'])
1306 wctx = repo[fromrev]
1306 wctx = repo[fromrev]
1307 else:
1307 else:
1308 wctx = repo[None]
1308 wctx = repo[None]
1309 # we don't want working dir files to be stat'd in the benchmark, so
1309 # we don't want working dir files to be stat'd in the benchmark, so
1310 # prime that cache
1310 # prime that cache
1311 wctx.dirty()
1311 wctx.dirty()
1312 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1312 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1313 if opts[b'base']:
1313 if opts[b'base']:
1314 fromrev = scmutil.revsingle(repo, opts[b'base'])
1314 fromrev = scmutil.revsingle(repo, opts[b'base'])
1315 ancestor = repo[fromrev]
1315 ancestor = repo[fromrev]
1316 else:
1316 else:
1317 ancestor = wctx.ancestor(rctx)
1317 ancestor = wctx.ancestor(rctx)
1318 return (wctx, rctx, ancestor)
1318 return (wctx, rctx, ancestor)
1319
1319
1320
1320
1321 @command(
1321 @command(
1322 b'perf::mergecalculate|perfmergecalculate',
1322 b'perf::mergecalculate|perfmergecalculate',
1323 [
1323 [
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1325 (b'', b'from', b'', b'rev to merge from'),
1325 (b'', b'from', b'', b'rev to merge from'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1327 ]
1327 ]
1328 + formatteropts,
1328 + formatteropts,
1329 )
1329 )
1330 def perfmergecalculate(ui, repo, **opts):
1330 def perfmergecalculate(ui, repo, **opts):
1331 opts = _byteskwargs(opts)
1331 opts = _byteskwargs(opts)
1332 timer, fm = gettimer(ui, opts)
1332 timer, fm = gettimer(ui, opts)
1333
1333
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1335
1335
1336 def d():
1336 def d():
1337 # acceptremote is True because we don't want prompts in the middle of
1337 # acceptremote is True because we don't want prompts in the middle of
1338 # our benchmark
1338 # our benchmark
1339 merge.calculateupdates(
1339 merge.calculateupdates(
1340 repo,
1340 repo,
1341 wctx,
1341 wctx,
1342 rctx,
1342 rctx,
1343 [ancestor],
1343 [ancestor],
1344 branchmerge=False,
1344 branchmerge=False,
1345 force=False,
1345 force=False,
1346 acceptremote=True,
1346 acceptremote=True,
1347 followcopies=True,
1347 followcopies=True,
1348 )
1348 )
1349
1349
1350 timer(d)
1350 timer(d)
1351 fm.end()
1351 fm.end()
1352
1352
1353
1353
1354 @command(
1354 @command(
1355 b'perf::mergecopies|perfmergecopies',
1355 b'perf::mergecopies|perfmergecopies',
1356 [
1356 [
1357 (b'r', b'rev', b'.', b'rev to merge against'),
1357 (b'r', b'rev', b'.', b'rev to merge against'),
1358 (b'', b'from', b'', b'rev to merge from'),
1358 (b'', b'from', b'', b'rev to merge from'),
1359 (b'', b'base', b'', b'the revision to use as base'),
1359 (b'', b'base', b'', b'the revision to use as base'),
1360 ]
1360 ]
1361 + formatteropts,
1361 + formatteropts,
1362 )
1362 )
1363 def perfmergecopies(ui, repo, **opts):
1363 def perfmergecopies(ui, repo, **opts):
1364 """measure runtime of `copies.mergecopies`"""
1364 """measure runtime of `copies.mergecopies`"""
1365 opts = _byteskwargs(opts)
1365 opts = _byteskwargs(opts)
1366 timer, fm = gettimer(ui, opts)
1366 timer, fm = gettimer(ui, opts)
1367 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1367 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1368
1368
1369 def d():
1369 def d():
1370 # acceptremote is True because we don't want prompts in the middle of
1370 # acceptremote is True because we don't want prompts in the middle of
1371 # our benchmark
1371 # our benchmark
1372 copies.mergecopies(repo, wctx, rctx, ancestor)
1372 copies.mergecopies(repo, wctx, rctx, ancestor)
1373
1373
1374 timer(d)
1374 timer(d)
1375 fm.end()
1375 fm.end()
1376
1376
1377
1377
1378 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1378 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1379 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1379 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1380 """benchmark the copy tracing logic"""
1380 """benchmark the copy tracing logic"""
1381 opts = _byteskwargs(opts)
1381 opts = _byteskwargs(opts)
1382 timer, fm = gettimer(ui, opts)
1382 timer, fm = gettimer(ui, opts)
1383 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1383 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1384 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1384 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1385
1385
1386 def d():
1386 def d():
1387 copies.pathcopies(ctx1, ctx2)
1387 copies.pathcopies(ctx1, ctx2)
1388
1388
1389 timer(d)
1389 timer(d)
1390 fm.end()
1390 fm.end()
1391
1391
1392
1392
1393 @command(
1393 @command(
1394 b'perf::phases|perfphases',
1394 b'perf::phases|perfphases',
1395 [
1395 [
1396 (b'', b'full', False, b'include file reading time too'),
1396 (b'', b'full', False, b'include file reading time too'),
1397 ],
1397 ],
1398 b"",
1398 b"",
1399 )
1399 )
1400 def perfphases(ui, repo, **opts):
1400 def perfphases(ui, repo, **opts):
1401 """benchmark phasesets computation"""
1401 """benchmark phasesets computation"""
1402 opts = _byteskwargs(opts)
1402 opts = _byteskwargs(opts)
1403 timer, fm = gettimer(ui, opts)
1403 timer, fm = gettimer(ui, opts)
1404 _phases = repo._phasecache
1404 _phases = repo._phasecache
1405 full = opts.get(b'full')
1405 full = opts.get(b'full')
1406
1406
1407 def d():
1407 def d():
1408 phases = _phases
1408 phases = _phases
1409 if full:
1409 if full:
1410 clearfilecache(repo, b'_phasecache')
1410 clearfilecache(repo, b'_phasecache')
1411 phases = repo._phasecache
1411 phases = repo._phasecache
1412 phases.invalidate()
1412 phases.invalidate()
1413 phases.loadphaserevs(repo)
1413 phases.loadphaserevs(repo)
1414
1414
1415 timer(d)
1415 timer(d)
1416 fm.end()
1416 fm.end()
1417
1417
1418
1418
1419 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1419 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1420 def perfphasesremote(ui, repo, dest=None, **opts):
1420 def perfphasesremote(ui, repo, dest=None, **opts):
1421 """benchmark time needed to analyse phases of the remote server"""
1421 """benchmark time needed to analyse phases of the remote server"""
1422 from mercurial.node import bin
1422 from mercurial.node import bin
1423 from mercurial import (
1423 from mercurial import (
1424 exchange,
1424 exchange,
1425 hg,
1425 hg,
1426 phases,
1426 phases,
1427 )
1427 )
1428
1428
1429 opts = _byteskwargs(opts)
1429 opts = _byteskwargs(opts)
1430 timer, fm = gettimer(ui, opts)
1430 timer, fm = gettimer(ui, opts)
1431
1431
1432 path = ui.getpath(dest, default=(b'default-push', b'default'))
1432 path = ui.getpath(dest, default=(b'default-push', b'default'))
1433 if not path:
1433 if not path:
1434 raise error.Abort(
1434 raise error.Abort(
1435 b'default repository not configured!',
1435 b'default repository not configured!',
1436 hint=b"see 'hg help config.paths'",
1436 hint=b"see 'hg help config.paths'",
1437 )
1437 )
1438 dest = path.pushloc or path.loc
1438 dest = path.pushloc or path.loc
1439 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1439 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1440 other = hg.peer(repo, opts, dest)
1440 other = hg.peer(repo, opts, dest)
1441
1441
1442 # easier to perform discovery through the operation
1442 # easier to perform discovery through the operation
1443 op = exchange.pushoperation(repo, other)
1443 op = exchange.pushoperation(repo, other)
1444 exchange._pushdiscoverychangeset(op)
1444 exchange._pushdiscoverychangeset(op)
1445
1445
1446 remotesubset = op.fallbackheads
1446 remotesubset = op.fallbackheads
1447
1447
1448 with other.commandexecutor() as e:
1448 with other.commandexecutor() as e:
1449 remotephases = e.callcommand(
1449 remotephases = e.callcommand(
1450 b'listkeys', {b'namespace': b'phases'}
1450 b'listkeys', {b'namespace': b'phases'}
1451 ).result()
1451 ).result()
1452 del other
1452 del other
1453 publishing = remotephases.get(b'publishing', False)
1453 publishing = remotephases.get(b'publishing', False)
1454 if publishing:
1454 if publishing:
1455 ui.statusnoi18n(b'publishing: yes\n')
1455 ui.statusnoi18n(b'publishing: yes\n')
1456 else:
1456 else:
1457 ui.statusnoi18n(b'publishing: no\n')
1457 ui.statusnoi18n(b'publishing: no\n')
1458
1458
1459 has_node = getattr(repo.changelog.index, 'has_node', None)
1459 has_node = getattr(repo.changelog.index, 'has_node', None)
1460 if has_node is None:
1460 if has_node is None:
1461 has_node = repo.changelog.nodemap.__contains__
1461 has_node = repo.changelog.nodemap.__contains__
1462 nonpublishroots = 0
1462 nonpublishroots = 0
1463 for nhex, phase in remotephases.iteritems():
1463 for nhex, phase in remotephases.iteritems():
1464 if nhex == b'publishing': # ignore data related to publish option
1464 if nhex == b'publishing': # ignore data related to publish option
1465 continue
1465 continue
1466 node = bin(nhex)
1466 node = bin(nhex)
1467 if has_node(node) and int(phase):
1467 if has_node(node) and int(phase):
1468 nonpublishroots += 1
1468 nonpublishroots += 1
1469 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1469 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1470 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1470 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1471
1471
1472 def d():
1472 def d():
1473 phases.remotephasessummary(repo, remotesubset, remotephases)
1473 phases.remotephasessummary(repo, remotesubset, remotephases)
1474
1474
1475 timer(d)
1475 timer(d)
1476 fm.end()
1476 fm.end()
1477
1477
1478
1478
1479 @command(
1479 @command(
1480 b'perf::manifest|perfmanifest',
1480 b'perf::manifest|perfmanifest',
1481 [
1481 [
1482 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1482 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1483 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1483 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1484 ]
1484 ]
1485 + formatteropts,
1485 + formatteropts,
1486 b'REV|NODE',
1486 b'REV|NODE',
1487 )
1487 )
1488 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1488 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1489 """benchmark the time to read a manifest from disk and return a usable
1489 """benchmark the time to read a manifest from disk and return a usable
1490 dict-like object
1490 dict-like object
1491
1491
1492 Manifest caches are cleared before retrieval."""
1492 Manifest caches are cleared before retrieval."""
1493 opts = _byteskwargs(opts)
1493 opts = _byteskwargs(opts)
1494 timer, fm = gettimer(ui, opts)
1494 timer, fm = gettimer(ui, opts)
1495 if not manifest_rev:
1495 if not manifest_rev:
1496 ctx = scmutil.revsingle(repo, rev, rev)
1496 ctx = scmutil.revsingle(repo, rev, rev)
1497 t = ctx.manifestnode()
1497 t = ctx.manifestnode()
1498 else:
1498 else:
1499 from mercurial.node import bin
1499 from mercurial.node import bin
1500
1500
1501 if len(rev) == 40:
1501 if len(rev) == 40:
1502 t = bin(rev)
1502 t = bin(rev)
1503 else:
1503 else:
1504 try:
1504 try:
1505 rev = int(rev)
1505 rev = int(rev)
1506
1506
1507 if util.safehasattr(repo.manifestlog, b'getstorage'):
1507 if util.safehasattr(repo.manifestlog, b'getstorage'):
1508 t = repo.manifestlog.getstorage(b'').node(rev)
1508 t = repo.manifestlog.getstorage(b'').node(rev)
1509 else:
1509 else:
1510 t = repo.manifestlog._revlog.lookup(rev)
1510 t = repo.manifestlog._revlog.lookup(rev)
1511 except ValueError:
1511 except ValueError:
1512 raise error.Abort(
1512 raise error.Abort(
1513 b'manifest revision must be integer or full node'
1513 b'manifest revision must be integer or full node'
1514 )
1514 )
1515
1515
1516 def d():
1516 def d():
1517 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1517 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1518 repo.manifestlog[t].read()
1518 repo.manifestlog[t].read()
1519
1519
1520 timer(d)
1520 timer(d)
1521 fm.end()
1521 fm.end()
1522
1522
1523
1523
1524 @command(b'perf::changeset|perfchangeset', formatteropts)
1524 @command(b'perf::changeset|perfchangeset', formatteropts)
1525 def perfchangeset(ui, repo, rev, **opts):
1525 def perfchangeset(ui, repo, rev, **opts):
1526 opts = _byteskwargs(opts)
1526 opts = _byteskwargs(opts)
1527 timer, fm = gettimer(ui, opts)
1527 timer, fm = gettimer(ui, opts)
1528 n = scmutil.revsingle(repo, rev).node()
1528 n = scmutil.revsingle(repo, rev).node()
1529
1529
1530 def d():
1530 def d():
1531 repo.changelog.read(n)
1531 repo.changelog.read(n)
1532 # repo.changelog._cache = None
1532 # repo.changelog._cache = None
1533
1533
1534 timer(d)
1534 timer(d)
1535 fm.end()
1535 fm.end()
1536
1536
1537
1537
1538 @command(b'perf::ignore|perfignore', formatteropts)
1538 @command(b'perf::ignore|perfignore', formatteropts)
1539 def perfignore(ui, repo, **opts):
1539 def perfignore(ui, repo, **opts):
1540 """benchmark operation related to computing ignore"""
1540 """benchmark operation related to computing ignore"""
1541 opts = _byteskwargs(opts)
1541 opts = _byteskwargs(opts)
1542 timer, fm = gettimer(ui, opts)
1542 timer, fm = gettimer(ui, opts)
1543 dirstate = repo.dirstate
1543 dirstate = repo.dirstate
1544
1544
1545 def setupone():
1545 def setupone():
1546 dirstate.invalidate()
1546 dirstate.invalidate()
1547 clearfilecache(dirstate, b'_ignore')
1547 clearfilecache(dirstate, b'_ignore')
1548
1548
1549 def runone():
1549 def runone():
1550 dirstate._ignore
1550 dirstate._ignore
1551
1551
1552 timer(runone, setup=setupone, title=b"load")
1552 timer(runone, setup=setupone, title=b"load")
1553 fm.end()
1553 fm.end()
1554
1554
1555
1555
1556 @command(
1556 @command(
1557 b'perf::index|perfindex',
1557 b'perf::index|perfindex',
1558 [
1558 [
1559 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1559 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1560 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1560 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1561 ]
1561 ]
1562 + formatteropts,
1562 + formatteropts,
1563 )
1563 )
1564 def perfindex(ui, repo, **opts):
1564 def perfindex(ui, repo, **opts):
1565 """benchmark index creation time followed by a lookup
1565 """benchmark index creation time followed by a lookup
1566
1566
1567 The default is to look `tip` up. Depending on the index implementation,
1567 The default is to look `tip` up. Depending on the index implementation,
1568 the revision looked up can matters. For example, an implementation
1568 the revision looked up can matters. For example, an implementation
1569 scanning the index will have a faster lookup time for `--rev tip` than for
1569 scanning the index will have a faster lookup time for `--rev tip` than for
1570 `--rev 0`. The number of looked up revisions and their order can also
1570 `--rev 0`. The number of looked up revisions and their order can also
1571 matters.
1571 matters.
1572
1572
1573 Example of useful set to test:
1573 Example of useful set to test:
1574
1574
1575 * tip
1575 * tip
1576 * 0
1576 * 0
1577 * -10:
1577 * -10:
1578 * :10
1578 * :10
1579 * -10: + :10
1579 * -10: + :10
1580 * :10: + -10:
1580 * :10: + -10:
1581 * -10000:
1581 * -10000:
1582 * -10000: + 0
1582 * -10000: + 0
1583
1583
1584 It is not currently possible to check for lookup of a missing node. For
1584 It is not currently possible to check for lookup of a missing node. For
1585 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1585 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1586 import mercurial.revlog
1586 import mercurial.revlog
1587
1587
1588 opts = _byteskwargs(opts)
1588 opts = _byteskwargs(opts)
1589 timer, fm = gettimer(ui, opts)
1589 timer, fm = gettimer(ui, opts)
1590 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1590 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1591 if opts[b'no_lookup']:
1591 if opts[b'no_lookup']:
1592 if opts['rev']:
1592 if opts['rev']:
1593 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1593 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1594 nodes = []
1594 nodes = []
1595 elif not opts[b'rev']:
1595 elif not opts[b'rev']:
1596 nodes = [repo[b"tip"].node()]
1596 nodes = [repo[b"tip"].node()]
1597 else:
1597 else:
1598 revs = scmutil.revrange(repo, opts[b'rev'])
1598 revs = scmutil.revrange(repo, opts[b'rev'])
1599 cl = repo.changelog
1599 cl = repo.changelog
1600 nodes = [cl.node(r) for r in revs]
1600 nodes = [cl.node(r) for r in revs]
1601
1601
1602 unfi = repo.unfiltered()
1602 unfi = repo.unfiltered()
1603 # find the filecache func directly
1603 # find the filecache func directly
1604 # This avoid polluting the benchmark with the filecache logic
1604 # This avoid polluting the benchmark with the filecache logic
1605 makecl = unfi.__class__.changelog.func
1605 makecl = unfi.__class__.changelog.func
1606
1606
1607 def setup():
1607 def setup():
1608 # probably not necessary, but for good measure
1608 # probably not necessary, but for good measure
1609 clearchangelog(unfi)
1609 clearchangelog(unfi)
1610
1610
1611 def d():
1611 def d():
1612 cl = makecl(unfi)
1612 cl = makecl(unfi)
1613 for n in nodes:
1613 for n in nodes:
1614 cl.rev(n)
1614 cl.rev(n)
1615
1615
1616 timer(d, setup=setup)
1616 timer(d, setup=setup)
1617 fm.end()
1617 fm.end()
1618
1618
1619
1619
1620 @command(
1620 @command(
1621 b'perf::nodemap|perfnodemap',
1621 b'perf::nodemap|perfnodemap',
1622 [
1622 [
1623 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1623 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1624 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1624 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1625 ]
1625 ]
1626 + formatteropts,
1626 + formatteropts,
1627 )
1627 )
1628 def perfnodemap(ui, repo, **opts):
1628 def perfnodemap(ui, repo, **opts):
1629 """benchmark the time necessary to look up revision from a cold nodemap
1629 """benchmark the time necessary to look up revision from a cold nodemap
1630
1630
1631 Depending on the implementation, the amount and order of revision we look
1631 Depending on the implementation, the amount and order of revision we look
1632 up can varies. Example of useful set to test:
1632 up can varies. Example of useful set to test:
1633 * tip
1633 * tip
1634 * 0
1634 * 0
1635 * -10:
1635 * -10:
1636 * :10
1636 * :10
1637 * -10: + :10
1637 * -10: + :10
1638 * :10: + -10:
1638 * :10: + -10:
1639 * -10000:
1639 * -10000:
1640 * -10000: + 0
1640 * -10000: + 0
1641
1641
1642 The command currently focus on valid binary lookup. Benchmarking for
1642 The command currently focus on valid binary lookup. Benchmarking for
1643 hexlookup, prefix lookup and missing lookup would also be valuable.
1643 hexlookup, prefix lookup and missing lookup would also be valuable.
1644 """
1644 """
1645 import mercurial.revlog
1645 import mercurial.revlog
1646
1646
1647 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1648 timer, fm = gettimer(ui, opts)
1649 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1649 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1650
1650
1651 unfi = repo.unfiltered()
1651 unfi = repo.unfiltered()
1652 clearcaches = opts[b'clear_caches']
1652 clearcaches = opts[b'clear_caches']
1653 # find the filecache func directly
1653 # find the filecache func directly
1654 # This avoid polluting the benchmark with the filecache logic
1654 # This avoid polluting the benchmark with the filecache logic
1655 makecl = unfi.__class__.changelog.func
1655 makecl = unfi.__class__.changelog.func
1656 if not opts[b'rev']:
1656 if not opts[b'rev']:
1657 raise error.Abort(b'use --rev to specify revisions to look up')
1657 raise error.Abort(b'use --rev to specify revisions to look up')
1658 revs = scmutil.revrange(repo, opts[b'rev'])
1658 revs = scmutil.revrange(repo, opts[b'rev'])
1659 cl = repo.changelog
1659 cl = repo.changelog
1660 nodes = [cl.node(r) for r in revs]
1660 nodes = [cl.node(r) for r in revs]
1661
1661
1662 # use a list to pass reference to a nodemap from one closure to the next
1662 # use a list to pass reference to a nodemap from one closure to the next
1663 nodeget = [None]
1663 nodeget = [None]
1664
1664
1665 def setnodeget():
1665 def setnodeget():
1666 # probably not necessary, but for good measure
1666 # probably not necessary, but for good measure
1667 clearchangelog(unfi)
1667 clearchangelog(unfi)
1668 cl = makecl(unfi)
1668 cl = makecl(unfi)
1669 if util.safehasattr(cl.index, 'get_rev'):
1669 if util.safehasattr(cl.index, 'get_rev'):
1670 nodeget[0] = cl.index.get_rev
1670 nodeget[0] = cl.index.get_rev
1671 else:
1671 else:
1672 nodeget[0] = cl.nodemap.get
1672 nodeget[0] = cl.nodemap.get
1673
1673
1674 def d():
1674 def d():
1675 get = nodeget[0]
1675 get = nodeget[0]
1676 for n in nodes:
1676 for n in nodes:
1677 get(n)
1677 get(n)
1678
1678
1679 setup = None
1679 setup = None
1680 if clearcaches:
1680 if clearcaches:
1681
1681
1682 def setup():
1682 def setup():
1683 setnodeget()
1683 setnodeget()
1684
1684
1685 else:
1685 else:
1686 setnodeget()
1686 setnodeget()
1687 d() # prewarm the data structure
1687 d() # prewarm the data structure
1688 timer(d, setup=setup)
1688 timer(d, setup=setup)
1689 fm.end()
1689 fm.end()
1690
1690
1691
1691
1692 @command(b'perf::startup|perfstartup', formatteropts)
1692 @command(b'perf::startup|perfstartup', formatteropts)
1693 def perfstartup(ui, repo, **opts):
1693 def perfstartup(ui, repo, **opts):
1694 opts = _byteskwargs(opts)
1694 opts = _byteskwargs(opts)
1695 timer, fm = gettimer(ui, opts)
1695 timer, fm = gettimer(ui, opts)
1696
1696
1697 def d():
1697 def d():
1698 if os.name != 'nt':
1698 if os.name != 'nt':
1699 os.system(
1699 os.system(
1700 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1700 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1701 )
1701 )
1702 else:
1702 else:
1703 os.environ['HGRCPATH'] = r' '
1703 os.environ['HGRCPATH'] = r' '
1704 os.system("%s version -q > NUL" % sys.argv[0])
1704 os.system("%s version -q > NUL" % sys.argv[0])
1705
1705
1706 timer(d)
1706 timer(d)
1707 fm.end()
1707 fm.end()
1708
1708
1709
1709
1710 @command(b'perf::parents|perfparents', formatteropts)
1710 @command(b'perf::parents|perfparents', formatteropts)
1711 def perfparents(ui, repo, **opts):
1711 def perfparents(ui, repo, **opts):
1712 """benchmark the time necessary to fetch one changeset's parents.
1712 """benchmark the time necessary to fetch one changeset's parents.
1713
1713
1714 The fetch is done using the `node identifier`, traversing all object layers
1714 The fetch is done using the `node identifier`, traversing all object layers
1715 from the repository object. The first N revisions will be used for this
1715 from the repository object. The first N revisions will be used for this
1716 benchmark. N is controlled by the ``perf.parentscount`` config option
1716 benchmark. N is controlled by the ``perf.parentscount`` config option
1717 (default: 1000).
1717 (default: 1000).
1718 """
1718 """
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 # control the number of commits perfparents iterates over
1721 # control the number of commits perfparents iterates over
1722 # experimental config: perf.parentscount
1722 # experimental config: perf.parentscount
1723 count = getint(ui, b"perf", b"parentscount", 1000)
1723 count = getint(ui, b"perf", b"parentscount", 1000)
1724 if len(repo.changelog) < count:
1724 if len(repo.changelog) < count:
1725 raise error.Abort(b"repo needs %d commits for this test" % count)
1725 raise error.Abort(b"repo needs %d commits for this test" % count)
1726 repo = repo.unfiltered()
1726 repo = repo.unfiltered()
1727 nl = [repo.changelog.node(i) for i in _xrange(count)]
1727 nl = [repo.changelog.node(i) for i in _xrange(count)]
1728
1728
1729 def d():
1729 def d():
1730 for n in nl:
1730 for n in nl:
1731 repo.changelog.parents(n)
1731 repo.changelog.parents(n)
1732
1732
1733 timer(d)
1733 timer(d)
1734 fm.end()
1734 fm.end()
1735
1735
1736
1736
1737 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1737 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1738 def perfctxfiles(ui, repo, x, **opts):
1738 def perfctxfiles(ui, repo, x, **opts):
1739 opts = _byteskwargs(opts)
1739 opts = _byteskwargs(opts)
1740 x = int(x)
1740 x = int(x)
1741 timer, fm = gettimer(ui, opts)
1741 timer, fm = gettimer(ui, opts)
1742
1742
1743 def d():
1743 def d():
1744 len(repo[x].files())
1744 len(repo[x].files())
1745
1745
1746 timer(d)
1746 timer(d)
1747 fm.end()
1747 fm.end()
1748
1748
1749
1749
1750 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1750 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1751 def perfrawfiles(ui, repo, x, **opts):
1751 def perfrawfiles(ui, repo, x, **opts):
1752 opts = _byteskwargs(opts)
1752 opts = _byteskwargs(opts)
1753 x = int(x)
1753 x = int(x)
1754 timer, fm = gettimer(ui, opts)
1754 timer, fm = gettimer(ui, opts)
1755 cl = repo.changelog
1755 cl = repo.changelog
1756
1756
1757 def d():
1757 def d():
1758 len(cl.read(x)[3])
1758 len(cl.read(x)[3])
1759
1759
1760 timer(d)
1760 timer(d)
1761 fm.end()
1761 fm.end()
1762
1762
1763
1763
1764 @command(b'perf::lookup|perflookup', formatteropts)
1764 @command(b'perf::lookup|perflookup', formatteropts)
1765 def perflookup(ui, repo, rev, **opts):
1765 def perflookup(ui, repo, rev, **opts):
1766 opts = _byteskwargs(opts)
1766 opts = _byteskwargs(opts)
1767 timer, fm = gettimer(ui, opts)
1767 timer, fm = gettimer(ui, opts)
1768 timer(lambda: len(repo.lookup(rev)))
1768 timer(lambda: len(repo.lookup(rev)))
1769 fm.end()
1769 fm.end()
1770
1770
1771
1771
1772 @command(
1772 @command(
1773 b'perf::linelogedits|perflinelogedits',
1773 b'perf::linelogedits|perflinelogedits',
1774 [
1774 [
1775 (b'n', b'edits', 10000, b'number of edits'),
1775 (b'n', b'edits', 10000, b'number of edits'),
1776 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1776 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1777 ],
1777 ],
1778 norepo=True,
1778 norepo=True,
1779 )
1779 )
1780 def perflinelogedits(ui, **opts):
1780 def perflinelogedits(ui, **opts):
1781 from mercurial import linelog
1781 from mercurial import linelog
1782
1782
1783 opts = _byteskwargs(opts)
1783 opts = _byteskwargs(opts)
1784
1784
1785 edits = opts[b'edits']
1785 edits = opts[b'edits']
1786 maxhunklines = opts[b'max_hunk_lines']
1786 maxhunklines = opts[b'max_hunk_lines']
1787
1787
1788 maxb1 = 100000
1788 maxb1 = 100000
1789 random.seed(0)
1789 random.seed(0)
1790 randint = random.randint
1790 randint = random.randint
1791 currentlines = 0
1791 currentlines = 0
1792 arglist = []
1792 arglist = []
1793 for rev in _xrange(edits):
1793 for rev in _xrange(edits):
1794 a1 = randint(0, currentlines)
1794 a1 = randint(0, currentlines)
1795 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1795 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1796 b1 = randint(0, maxb1)
1796 b1 = randint(0, maxb1)
1797 b2 = randint(b1, b1 + maxhunklines)
1797 b2 = randint(b1, b1 + maxhunklines)
1798 currentlines += (b2 - b1) - (a2 - a1)
1798 currentlines += (b2 - b1) - (a2 - a1)
1799 arglist.append((rev, a1, a2, b1, b2))
1799 arglist.append((rev, a1, a2, b1, b2))
1800
1800
1801 def d():
1801 def d():
1802 ll = linelog.linelog()
1802 ll = linelog.linelog()
1803 for args in arglist:
1803 for args in arglist:
1804 ll.replacelines(*args)
1804 ll.replacelines(*args)
1805
1805
1806 timer, fm = gettimer(ui, opts)
1806 timer, fm = gettimer(ui, opts)
1807 timer(d)
1807 timer(d)
1808 fm.end()
1808 fm.end()
1809
1809
1810
1810
1811 @command(b'perf::revrange|perfrevrange', formatteropts)
1811 @command(b'perf::revrange|perfrevrange', formatteropts)
1812 def perfrevrange(ui, repo, *specs, **opts):
1812 def perfrevrange(ui, repo, *specs, **opts):
1813 opts = _byteskwargs(opts)
1813 opts = _byteskwargs(opts)
1814 timer, fm = gettimer(ui, opts)
1814 timer, fm = gettimer(ui, opts)
1815 revrange = scmutil.revrange
1815 revrange = scmutil.revrange
1816 timer(lambda: len(revrange(repo, specs)))
1816 timer(lambda: len(revrange(repo, specs)))
1817 fm.end()
1817 fm.end()
1818
1818
1819
1819
1820 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1820 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1821 def perfnodelookup(ui, repo, rev, **opts):
1821 def perfnodelookup(ui, repo, rev, **opts):
1822 opts = _byteskwargs(opts)
1822 opts = _byteskwargs(opts)
1823 timer, fm = gettimer(ui, opts)
1823 timer, fm = gettimer(ui, opts)
1824 import mercurial.revlog
1824 import mercurial.revlog
1825
1825
1826 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1826 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1827 n = scmutil.revsingle(repo, rev).node()
1827 n = scmutil.revsingle(repo, rev).node()
1828
1828
1829 try:
1830 cl = revlog(getsvfs(repo), radix=b"00changelog")
1831 except TypeError:
1829 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1832 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1830
1833
1831 def d():
1834 def d():
1832 cl.rev(n)
1835 cl.rev(n)
1833 clearcaches(cl)
1836 clearcaches(cl)
1834
1837
1835 timer(d)
1838 timer(d)
1836 fm.end()
1839 fm.end()
1837
1840
1838
1841
1839 @command(
1842 @command(
1840 b'perf::log|perflog',
1843 b'perf::log|perflog',
1841 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1844 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1842 )
1845 )
1843 def perflog(ui, repo, rev=None, **opts):
1846 def perflog(ui, repo, rev=None, **opts):
1844 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1845 if rev is None:
1848 if rev is None:
1846 rev = []
1849 rev = []
1847 timer, fm = gettimer(ui, opts)
1850 timer, fm = gettimer(ui, opts)
1848 ui.pushbuffer()
1851 ui.pushbuffer()
1849 timer(
1852 timer(
1850 lambda: commands.log(
1853 lambda: commands.log(
1851 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1854 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1852 )
1855 )
1853 )
1856 )
1854 ui.popbuffer()
1857 ui.popbuffer()
1855 fm.end()
1858 fm.end()
1856
1859
1857
1860
1858 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1861 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1859 def perfmoonwalk(ui, repo, **opts):
1862 def perfmoonwalk(ui, repo, **opts):
1860 """benchmark walking the changelog backwards
1863 """benchmark walking the changelog backwards
1861
1864
1862 This also loads the changelog data for each revision in the changelog.
1865 This also loads the changelog data for each revision in the changelog.
1863 """
1866 """
1864 opts = _byteskwargs(opts)
1867 opts = _byteskwargs(opts)
1865 timer, fm = gettimer(ui, opts)
1868 timer, fm = gettimer(ui, opts)
1866
1869
1867 def moonwalk():
1870 def moonwalk():
1868 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1871 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1869 ctx = repo[i]
1872 ctx = repo[i]
1870 ctx.branch() # read changelog data (in addition to the index)
1873 ctx.branch() # read changelog data (in addition to the index)
1871
1874
1872 timer(moonwalk)
1875 timer(moonwalk)
1873 fm.end()
1876 fm.end()
1874
1877
1875
1878
1876 @command(
1879 @command(
1877 b'perf::templating|perftemplating',
1880 b'perf::templating|perftemplating',
1878 [
1881 [
1879 (b'r', b'rev', [], b'revisions to run the template on'),
1882 (b'r', b'rev', [], b'revisions to run the template on'),
1880 ]
1883 ]
1881 + formatteropts,
1884 + formatteropts,
1882 )
1885 )
1883 def perftemplating(ui, repo, testedtemplate=None, **opts):
1886 def perftemplating(ui, repo, testedtemplate=None, **opts):
1884 """test the rendering time of a given template"""
1887 """test the rendering time of a given template"""
1885 if makelogtemplater is None:
1888 if makelogtemplater is None:
1886 raise error.Abort(
1889 raise error.Abort(
1887 b"perftemplating not available with this Mercurial",
1890 b"perftemplating not available with this Mercurial",
1888 hint=b"use 4.3 or later",
1891 hint=b"use 4.3 or later",
1889 )
1892 )
1890
1893
1891 opts = _byteskwargs(opts)
1894 opts = _byteskwargs(opts)
1892
1895
1893 nullui = ui.copy()
1896 nullui = ui.copy()
1894 nullui.fout = open(os.devnull, 'wb')
1897 nullui.fout = open(os.devnull, 'wb')
1895 nullui.disablepager()
1898 nullui.disablepager()
1896 revs = opts.get(b'rev')
1899 revs = opts.get(b'rev')
1897 if not revs:
1900 if not revs:
1898 revs = [b'all()']
1901 revs = [b'all()']
1899 revs = list(scmutil.revrange(repo, revs))
1902 revs = list(scmutil.revrange(repo, revs))
1900
1903
1901 defaulttemplate = (
1904 defaulttemplate = (
1902 b'{date|shortdate} [{rev}:{node|short}]'
1905 b'{date|shortdate} [{rev}:{node|short}]'
1903 b' {author|person}: {desc|firstline}\n'
1906 b' {author|person}: {desc|firstline}\n'
1904 )
1907 )
1905 if testedtemplate is None:
1908 if testedtemplate is None:
1906 testedtemplate = defaulttemplate
1909 testedtemplate = defaulttemplate
1907 displayer = makelogtemplater(nullui, repo, testedtemplate)
1910 displayer = makelogtemplater(nullui, repo, testedtemplate)
1908
1911
1909 def format():
1912 def format():
1910 for r in revs:
1913 for r in revs:
1911 ctx = repo[r]
1914 ctx = repo[r]
1912 displayer.show(ctx)
1915 displayer.show(ctx)
1913 displayer.flush(ctx)
1916 displayer.flush(ctx)
1914
1917
1915 timer, fm = gettimer(ui, opts)
1918 timer, fm = gettimer(ui, opts)
1916 timer(format)
1919 timer(format)
1917 fm.end()
1920 fm.end()
1918
1921
1919
1922
1920 def _displaystats(ui, opts, entries, data):
1923 def _displaystats(ui, opts, entries, data):
1921 # use a second formatter because the data are quite different, not sure
1924 # use a second formatter because the data are quite different, not sure
1922 # how it flies with the templater.
1925 # how it flies with the templater.
1923 fm = ui.formatter(b'perf-stats', opts)
1926 fm = ui.formatter(b'perf-stats', opts)
1924 for key, title in entries:
1927 for key, title in entries:
1925 values = data[key]
1928 values = data[key]
1926 nbvalues = len(data)
1929 nbvalues = len(data)
1927 values.sort()
1930 values.sort()
1928 stats = {
1931 stats = {
1929 'key': key,
1932 'key': key,
1930 'title': title,
1933 'title': title,
1931 'nbitems': len(values),
1934 'nbitems': len(values),
1932 'min': values[0][0],
1935 'min': values[0][0],
1933 '10%': values[(nbvalues * 10) // 100][0],
1936 '10%': values[(nbvalues * 10) // 100][0],
1934 '25%': values[(nbvalues * 25) // 100][0],
1937 '25%': values[(nbvalues * 25) // 100][0],
1935 '50%': values[(nbvalues * 50) // 100][0],
1938 '50%': values[(nbvalues * 50) // 100][0],
1936 '75%': values[(nbvalues * 75) // 100][0],
1939 '75%': values[(nbvalues * 75) // 100][0],
1937 '80%': values[(nbvalues * 80) // 100][0],
1940 '80%': values[(nbvalues * 80) // 100][0],
1938 '85%': values[(nbvalues * 85) // 100][0],
1941 '85%': values[(nbvalues * 85) // 100][0],
1939 '90%': values[(nbvalues * 90) // 100][0],
1942 '90%': values[(nbvalues * 90) // 100][0],
1940 '95%': values[(nbvalues * 95) // 100][0],
1943 '95%': values[(nbvalues * 95) // 100][0],
1941 '99%': values[(nbvalues * 99) // 100][0],
1944 '99%': values[(nbvalues * 99) // 100][0],
1942 'max': values[-1][0],
1945 'max': values[-1][0],
1943 }
1946 }
1944 fm.startitem()
1947 fm.startitem()
1945 fm.data(**stats)
1948 fm.data(**stats)
1946 # make node pretty for the human output
1949 # make node pretty for the human output
1947 fm.plain('### %s (%d items)\n' % (title, len(values)))
1950 fm.plain('### %s (%d items)\n' % (title, len(values)))
1948 lines = [
1951 lines = [
1949 'min',
1952 'min',
1950 '10%',
1953 '10%',
1951 '25%',
1954 '25%',
1952 '50%',
1955 '50%',
1953 '75%',
1956 '75%',
1954 '80%',
1957 '80%',
1955 '85%',
1958 '85%',
1956 '90%',
1959 '90%',
1957 '95%',
1960 '95%',
1958 '99%',
1961 '99%',
1959 'max',
1962 'max',
1960 ]
1963 ]
1961 for l in lines:
1964 for l in lines:
1962 fm.plain('%s: %s\n' % (l, stats[l]))
1965 fm.plain('%s: %s\n' % (l, stats[l]))
1963 fm.end()
1966 fm.end()
1964
1967
1965
1968
1966 @command(
1969 @command(
1967 b'perf::helper-mergecopies|perfhelper-mergecopies',
1970 b'perf::helper-mergecopies|perfhelper-mergecopies',
1968 formatteropts
1971 formatteropts
1969 + [
1972 + [
1970 (b'r', b'revs', [], b'restrict search to these revisions'),
1973 (b'r', b'revs', [], b'restrict search to these revisions'),
1971 (b'', b'timing', False, b'provides extra data (costly)'),
1974 (b'', b'timing', False, b'provides extra data (costly)'),
1972 (b'', b'stats', False, b'provides statistic about the measured data'),
1975 (b'', b'stats', False, b'provides statistic about the measured data'),
1973 ],
1976 ],
1974 )
1977 )
1975 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1978 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1976 """find statistics about potential parameters for `perfmergecopies`
1979 """find statistics about potential parameters for `perfmergecopies`
1977
1980
1978 This command find (base, p1, p2) triplet relevant for copytracing
1981 This command find (base, p1, p2) triplet relevant for copytracing
1979 benchmarking in the context of a merge. It reports values for some of the
1982 benchmarking in the context of a merge. It reports values for some of the
1980 parameters that impact merge copy tracing time during merge.
1983 parameters that impact merge copy tracing time during merge.
1981
1984
1982 If `--timing` is set, rename detection is run and the associated timing
1985 If `--timing` is set, rename detection is run and the associated timing
1983 will be reported. The extra details come at the cost of slower command
1986 will be reported. The extra details come at the cost of slower command
1984 execution.
1987 execution.
1985
1988
1986 Since rename detection is only run once, other factors might easily
1989 Since rename detection is only run once, other factors might easily
1987 affect the precision of the timing. However it should give a good
1990 affect the precision of the timing. However it should give a good
1988 approximation of which revision triplets are very costly.
1991 approximation of which revision triplets are very costly.
1989 """
1992 """
1990 opts = _byteskwargs(opts)
1993 opts = _byteskwargs(opts)
1991 fm = ui.formatter(b'perf', opts)
1994 fm = ui.formatter(b'perf', opts)
1992 dotiming = opts[b'timing']
1995 dotiming = opts[b'timing']
1993 dostats = opts[b'stats']
1996 dostats = opts[b'stats']
1994
1997
1995 output_template = [
1998 output_template = [
1996 ("base", "%(base)12s"),
1999 ("base", "%(base)12s"),
1997 ("p1", "%(p1.node)12s"),
2000 ("p1", "%(p1.node)12s"),
1998 ("p2", "%(p2.node)12s"),
2001 ("p2", "%(p2.node)12s"),
1999 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2002 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2000 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2003 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2001 ("p1.renames", "%(p1.renamedfiles)12d"),
2004 ("p1.renames", "%(p1.renamedfiles)12d"),
2002 ("p1.time", "%(p1.time)12.3f"),
2005 ("p1.time", "%(p1.time)12.3f"),
2003 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2006 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2004 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2007 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2005 ("p2.renames", "%(p2.renamedfiles)12d"),
2008 ("p2.renames", "%(p2.renamedfiles)12d"),
2006 ("p2.time", "%(p2.time)12.3f"),
2009 ("p2.time", "%(p2.time)12.3f"),
2007 ("renames", "%(nbrenamedfiles)12d"),
2010 ("renames", "%(nbrenamedfiles)12d"),
2008 ("total.time", "%(time)12.3f"),
2011 ("total.time", "%(time)12.3f"),
2009 ]
2012 ]
2010 if not dotiming:
2013 if not dotiming:
2011 output_template = [
2014 output_template = [
2012 i
2015 i
2013 for i in output_template
2016 for i in output_template
2014 if not ('time' in i[0] or 'renames' in i[0])
2017 if not ('time' in i[0] or 'renames' in i[0])
2015 ]
2018 ]
2016 header_names = [h for (h, v) in output_template]
2019 header_names = [h for (h, v) in output_template]
2017 output = ' '.join([v for (h, v) in output_template]) + '\n'
2020 output = ' '.join([v for (h, v) in output_template]) + '\n'
2018 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2021 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2019 fm.plain(header % tuple(header_names))
2022 fm.plain(header % tuple(header_names))
2020
2023
2021 if not revs:
2024 if not revs:
2022 revs = ['all()']
2025 revs = ['all()']
2023 revs = scmutil.revrange(repo, revs)
2026 revs = scmutil.revrange(repo, revs)
2024
2027
2025 if dostats:
2028 if dostats:
2026 alldata = {
2029 alldata = {
2027 'nbrevs': [],
2030 'nbrevs': [],
2028 'nbmissingfiles': [],
2031 'nbmissingfiles': [],
2029 }
2032 }
2030 if dotiming:
2033 if dotiming:
2031 alldata['parentnbrenames'] = []
2034 alldata['parentnbrenames'] = []
2032 alldata['totalnbrenames'] = []
2035 alldata['totalnbrenames'] = []
2033 alldata['parenttime'] = []
2036 alldata['parenttime'] = []
2034 alldata['totaltime'] = []
2037 alldata['totaltime'] = []
2035
2038
2036 roi = repo.revs('merge() and %ld', revs)
2039 roi = repo.revs('merge() and %ld', revs)
2037 for r in roi:
2040 for r in roi:
2038 ctx = repo[r]
2041 ctx = repo[r]
2039 p1 = ctx.p1()
2042 p1 = ctx.p1()
2040 p2 = ctx.p2()
2043 p2 = ctx.p2()
2041 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2044 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2042 for b in bases:
2045 for b in bases:
2043 b = repo[b]
2046 b = repo[b]
2044 p1missing = copies._computeforwardmissing(b, p1)
2047 p1missing = copies._computeforwardmissing(b, p1)
2045 p2missing = copies._computeforwardmissing(b, p2)
2048 p2missing = copies._computeforwardmissing(b, p2)
2046 data = {
2049 data = {
2047 b'base': b.hex(),
2050 b'base': b.hex(),
2048 b'p1.node': p1.hex(),
2051 b'p1.node': p1.hex(),
2049 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2052 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2050 b'p1.nbmissingfiles': len(p1missing),
2053 b'p1.nbmissingfiles': len(p1missing),
2051 b'p2.node': p2.hex(),
2054 b'p2.node': p2.hex(),
2052 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2055 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2053 b'p2.nbmissingfiles': len(p2missing),
2056 b'p2.nbmissingfiles': len(p2missing),
2054 }
2057 }
2055 if dostats:
2058 if dostats:
2056 if p1missing:
2059 if p1missing:
2057 alldata['nbrevs'].append(
2060 alldata['nbrevs'].append(
2058 (data['p1.nbrevs'], b.hex(), p1.hex())
2061 (data['p1.nbrevs'], b.hex(), p1.hex())
2059 )
2062 )
2060 alldata['nbmissingfiles'].append(
2063 alldata['nbmissingfiles'].append(
2061 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2064 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2062 )
2065 )
2063 if p2missing:
2066 if p2missing:
2064 alldata['nbrevs'].append(
2067 alldata['nbrevs'].append(
2065 (data['p2.nbrevs'], b.hex(), p2.hex())
2068 (data['p2.nbrevs'], b.hex(), p2.hex())
2066 )
2069 )
2067 alldata['nbmissingfiles'].append(
2070 alldata['nbmissingfiles'].append(
2068 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2071 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2069 )
2072 )
2070 if dotiming:
2073 if dotiming:
2071 begin = util.timer()
2074 begin = util.timer()
2072 mergedata = copies.mergecopies(repo, p1, p2, b)
2075 mergedata = copies.mergecopies(repo, p1, p2, b)
2073 end = util.timer()
2076 end = util.timer()
2074 # not very stable timing since we did only one run
2077 # not very stable timing since we did only one run
2075 data['time'] = end - begin
2078 data['time'] = end - begin
2076 # mergedata contains five dicts: "copy", "movewithdir",
2079 # mergedata contains five dicts: "copy", "movewithdir",
2077 # "diverge", "renamedelete" and "dirmove".
2080 # "diverge", "renamedelete" and "dirmove".
2078 # The first 4 are about renamed file so lets count that.
2081 # The first 4 are about renamed file so lets count that.
2079 renames = len(mergedata[0])
2082 renames = len(mergedata[0])
2080 renames += len(mergedata[1])
2083 renames += len(mergedata[1])
2081 renames += len(mergedata[2])
2084 renames += len(mergedata[2])
2082 renames += len(mergedata[3])
2085 renames += len(mergedata[3])
2083 data['nbrenamedfiles'] = renames
2086 data['nbrenamedfiles'] = renames
2084 begin = util.timer()
2087 begin = util.timer()
2085 p1renames = copies.pathcopies(b, p1)
2088 p1renames = copies.pathcopies(b, p1)
2086 end = util.timer()
2089 end = util.timer()
2087 data['p1.time'] = end - begin
2090 data['p1.time'] = end - begin
2088 begin = util.timer()
2091 begin = util.timer()
2089 p2renames = copies.pathcopies(b, p2)
2092 p2renames = copies.pathcopies(b, p2)
2090 end = util.timer()
2093 end = util.timer()
2091 data['p2.time'] = end - begin
2094 data['p2.time'] = end - begin
2092 data['p1.renamedfiles'] = len(p1renames)
2095 data['p1.renamedfiles'] = len(p1renames)
2093 data['p2.renamedfiles'] = len(p2renames)
2096 data['p2.renamedfiles'] = len(p2renames)
2094
2097
2095 if dostats:
2098 if dostats:
2096 if p1missing:
2099 if p1missing:
2097 alldata['parentnbrenames'].append(
2100 alldata['parentnbrenames'].append(
2098 (data['p1.renamedfiles'], b.hex(), p1.hex())
2101 (data['p1.renamedfiles'], b.hex(), p1.hex())
2099 )
2102 )
2100 alldata['parenttime'].append(
2103 alldata['parenttime'].append(
2101 (data['p1.time'], b.hex(), p1.hex())
2104 (data['p1.time'], b.hex(), p1.hex())
2102 )
2105 )
2103 if p2missing:
2106 if p2missing:
2104 alldata['parentnbrenames'].append(
2107 alldata['parentnbrenames'].append(
2105 (data['p2.renamedfiles'], b.hex(), p2.hex())
2108 (data['p2.renamedfiles'], b.hex(), p2.hex())
2106 )
2109 )
2107 alldata['parenttime'].append(
2110 alldata['parenttime'].append(
2108 (data['p2.time'], b.hex(), p2.hex())
2111 (data['p2.time'], b.hex(), p2.hex())
2109 )
2112 )
2110 if p1missing or p2missing:
2113 if p1missing or p2missing:
2111 alldata['totalnbrenames'].append(
2114 alldata['totalnbrenames'].append(
2112 (
2115 (
2113 data['nbrenamedfiles'],
2116 data['nbrenamedfiles'],
2114 b.hex(),
2117 b.hex(),
2115 p1.hex(),
2118 p1.hex(),
2116 p2.hex(),
2119 p2.hex(),
2117 )
2120 )
2118 )
2121 )
2119 alldata['totaltime'].append(
2122 alldata['totaltime'].append(
2120 (data['time'], b.hex(), p1.hex(), p2.hex())
2123 (data['time'], b.hex(), p1.hex(), p2.hex())
2121 )
2124 )
2122 fm.startitem()
2125 fm.startitem()
2123 fm.data(**data)
2126 fm.data(**data)
2124 # make node pretty for the human output
2127 # make node pretty for the human output
2125 out = data.copy()
2128 out = data.copy()
2126 out['base'] = fm.hexfunc(b.node())
2129 out['base'] = fm.hexfunc(b.node())
2127 out['p1.node'] = fm.hexfunc(p1.node())
2130 out['p1.node'] = fm.hexfunc(p1.node())
2128 out['p2.node'] = fm.hexfunc(p2.node())
2131 out['p2.node'] = fm.hexfunc(p2.node())
2129 fm.plain(output % out)
2132 fm.plain(output % out)
2130
2133
2131 fm.end()
2134 fm.end()
2132 if dostats:
2135 if dostats:
2133 # use a second formatter because the data are quite different, not sure
2136 # use a second formatter because the data are quite different, not sure
2134 # how it flies with the templater.
2137 # how it flies with the templater.
2135 entries = [
2138 entries = [
2136 ('nbrevs', 'number of revision covered'),
2139 ('nbrevs', 'number of revision covered'),
2137 ('nbmissingfiles', 'number of missing files at head'),
2140 ('nbmissingfiles', 'number of missing files at head'),
2138 ]
2141 ]
2139 if dotiming:
2142 if dotiming:
2140 entries.append(
2143 entries.append(
2141 ('parentnbrenames', 'rename from one parent to base')
2144 ('parentnbrenames', 'rename from one parent to base')
2142 )
2145 )
2143 entries.append(('totalnbrenames', 'total number of renames'))
2146 entries.append(('totalnbrenames', 'total number of renames'))
2144 entries.append(('parenttime', 'time for one parent'))
2147 entries.append(('parenttime', 'time for one parent'))
2145 entries.append(('totaltime', 'time for both parents'))
2148 entries.append(('totaltime', 'time for both parents'))
2146 _displaystats(ui, opts, entries, alldata)
2149 _displaystats(ui, opts, entries, alldata)
2147
2150
2148
2151
2149 @command(
2152 @command(
2150 b'perf::helper-pathcopies|perfhelper-pathcopies',
2153 b'perf::helper-pathcopies|perfhelper-pathcopies',
2151 formatteropts
2154 formatteropts
2152 + [
2155 + [
2153 (b'r', b'revs', [], b'restrict search to these revisions'),
2156 (b'r', b'revs', [], b'restrict search to these revisions'),
2154 (b'', b'timing', False, b'provides extra data (costly)'),
2157 (b'', b'timing', False, b'provides extra data (costly)'),
2155 (b'', b'stats', False, b'provides statistic about the measured data'),
2158 (b'', b'stats', False, b'provides statistic about the measured data'),
2156 ],
2159 ],
2157 )
2160 )
2158 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2161 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2159 """find statistic about potential parameters for the `perftracecopies`
2162 """find statistic about potential parameters for the `perftracecopies`
2160
2163
2161 This command find source-destination pair relevant for copytracing testing.
2164 This command find source-destination pair relevant for copytracing testing.
2162 It report value for some of the parameters that impact copy tracing time.
2165 It report value for some of the parameters that impact copy tracing time.
2163
2166
2164 If `--timing` is set, rename detection is run and the associated timing
2167 If `--timing` is set, rename detection is run and the associated timing
2165 will be reported. The extra details comes at the cost of a slower command
2168 will be reported. The extra details comes at the cost of a slower command
2166 execution.
2169 execution.
2167
2170
2168 Since the rename detection is only run once, other factors might easily
2171 Since the rename detection is only run once, other factors might easily
2169 affect the precision of the timing. However it should give a good
2172 affect the precision of the timing. However it should give a good
2170 approximation of which revision pairs are very costly.
2173 approximation of which revision pairs are very costly.
2171 """
2174 """
2172 opts = _byteskwargs(opts)
2175 opts = _byteskwargs(opts)
2173 fm = ui.formatter(b'perf', opts)
2176 fm = ui.formatter(b'perf', opts)
2174 dotiming = opts[b'timing']
2177 dotiming = opts[b'timing']
2175 dostats = opts[b'stats']
2178 dostats = opts[b'stats']
2176
2179
2177 if dotiming:
2180 if dotiming:
2178 header = '%12s %12s %12s %12s %12s %12s\n'
2181 header = '%12s %12s %12s %12s %12s %12s\n'
2179 output = (
2182 output = (
2180 "%(source)12s %(destination)12s "
2183 "%(source)12s %(destination)12s "
2181 "%(nbrevs)12d %(nbmissingfiles)12d "
2184 "%(nbrevs)12d %(nbmissingfiles)12d "
2182 "%(nbrenamedfiles)12d %(time)18.5f\n"
2185 "%(nbrenamedfiles)12d %(time)18.5f\n"
2183 )
2186 )
2184 header_names = (
2187 header_names = (
2185 "source",
2188 "source",
2186 "destination",
2189 "destination",
2187 "nb-revs",
2190 "nb-revs",
2188 "nb-files",
2191 "nb-files",
2189 "nb-renames",
2192 "nb-renames",
2190 "time",
2193 "time",
2191 )
2194 )
2192 fm.plain(header % header_names)
2195 fm.plain(header % header_names)
2193 else:
2196 else:
2194 header = '%12s %12s %12s %12s\n'
2197 header = '%12s %12s %12s %12s\n'
2195 output = (
2198 output = (
2196 "%(source)12s %(destination)12s "
2199 "%(source)12s %(destination)12s "
2197 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2200 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2198 )
2201 )
2199 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2202 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2200
2203
2201 if not revs:
2204 if not revs:
2202 revs = ['all()']
2205 revs = ['all()']
2203 revs = scmutil.revrange(repo, revs)
2206 revs = scmutil.revrange(repo, revs)
2204
2207
2205 if dostats:
2208 if dostats:
2206 alldata = {
2209 alldata = {
2207 'nbrevs': [],
2210 'nbrevs': [],
2208 'nbmissingfiles': [],
2211 'nbmissingfiles': [],
2209 }
2212 }
2210 if dotiming:
2213 if dotiming:
2211 alldata['nbrenames'] = []
2214 alldata['nbrenames'] = []
2212 alldata['time'] = []
2215 alldata['time'] = []
2213
2216
2214 roi = repo.revs('merge() and %ld', revs)
2217 roi = repo.revs('merge() and %ld', revs)
2215 for r in roi:
2218 for r in roi:
2216 ctx = repo[r]
2219 ctx = repo[r]
2217 p1 = ctx.p1().rev()
2220 p1 = ctx.p1().rev()
2218 p2 = ctx.p2().rev()
2221 p2 = ctx.p2().rev()
2219 bases = repo.changelog._commonancestorsheads(p1, p2)
2222 bases = repo.changelog._commonancestorsheads(p1, p2)
2220 for p in (p1, p2):
2223 for p in (p1, p2):
2221 for b in bases:
2224 for b in bases:
2222 base = repo[b]
2225 base = repo[b]
2223 parent = repo[p]
2226 parent = repo[p]
2224 missing = copies._computeforwardmissing(base, parent)
2227 missing = copies._computeforwardmissing(base, parent)
2225 if not missing:
2228 if not missing:
2226 continue
2229 continue
2227 data = {
2230 data = {
2228 b'source': base.hex(),
2231 b'source': base.hex(),
2229 b'destination': parent.hex(),
2232 b'destination': parent.hex(),
2230 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2233 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2231 b'nbmissingfiles': len(missing),
2234 b'nbmissingfiles': len(missing),
2232 }
2235 }
2233 if dostats:
2236 if dostats:
2234 alldata['nbrevs'].append(
2237 alldata['nbrevs'].append(
2235 (
2238 (
2236 data['nbrevs'],
2239 data['nbrevs'],
2237 base.hex(),
2240 base.hex(),
2238 parent.hex(),
2241 parent.hex(),
2239 )
2242 )
2240 )
2243 )
2241 alldata['nbmissingfiles'].append(
2244 alldata['nbmissingfiles'].append(
2242 (
2245 (
2243 data['nbmissingfiles'],
2246 data['nbmissingfiles'],
2244 base.hex(),
2247 base.hex(),
2245 parent.hex(),
2248 parent.hex(),
2246 )
2249 )
2247 )
2250 )
2248 if dotiming:
2251 if dotiming:
2249 begin = util.timer()
2252 begin = util.timer()
2250 renames = copies.pathcopies(base, parent)
2253 renames = copies.pathcopies(base, parent)
2251 end = util.timer()
2254 end = util.timer()
2252 # not very stable timing since we did only one run
2255 # not very stable timing since we did only one run
2253 data['time'] = end - begin
2256 data['time'] = end - begin
2254 data['nbrenamedfiles'] = len(renames)
2257 data['nbrenamedfiles'] = len(renames)
2255 if dostats:
2258 if dostats:
2256 alldata['time'].append(
2259 alldata['time'].append(
2257 (
2260 (
2258 data['time'],
2261 data['time'],
2259 base.hex(),
2262 base.hex(),
2260 parent.hex(),
2263 parent.hex(),
2261 )
2264 )
2262 )
2265 )
2263 alldata['nbrenames'].append(
2266 alldata['nbrenames'].append(
2264 (
2267 (
2265 data['nbrenamedfiles'],
2268 data['nbrenamedfiles'],
2266 base.hex(),
2269 base.hex(),
2267 parent.hex(),
2270 parent.hex(),
2268 )
2271 )
2269 )
2272 )
2270 fm.startitem()
2273 fm.startitem()
2271 fm.data(**data)
2274 fm.data(**data)
2272 out = data.copy()
2275 out = data.copy()
2273 out['source'] = fm.hexfunc(base.node())
2276 out['source'] = fm.hexfunc(base.node())
2274 out['destination'] = fm.hexfunc(parent.node())
2277 out['destination'] = fm.hexfunc(parent.node())
2275 fm.plain(output % out)
2278 fm.plain(output % out)
2276
2279
2277 fm.end()
2280 fm.end()
2278 if dostats:
2281 if dostats:
2279 entries = [
2282 entries = [
2280 ('nbrevs', 'number of revision covered'),
2283 ('nbrevs', 'number of revision covered'),
2281 ('nbmissingfiles', 'number of missing files at head'),
2284 ('nbmissingfiles', 'number of missing files at head'),
2282 ]
2285 ]
2283 if dotiming:
2286 if dotiming:
2284 entries.append(('nbrenames', 'renamed files'))
2287 entries.append(('nbrenames', 'renamed files'))
2285 entries.append(('time', 'time'))
2288 entries.append(('time', 'time'))
2286 _displaystats(ui, opts, entries, alldata)
2289 _displaystats(ui, opts, entries, alldata)
2287
2290
2288
2291
2289 @command(b'perf::cca|perfcca', formatteropts)
2292 @command(b'perf::cca|perfcca', formatteropts)
2290 def perfcca(ui, repo, **opts):
2293 def perfcca(ui, repo, **opts):
2291 opts = _byteskwargs(opts)
2294 opts = _byteskwargs(opts)
2292 timer, fm = gettimer(ui, opts)
2295 timer, fm = gettimer(ui, opts)
2293 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2296 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2294 fm.end()
2297 fm.end()
2295
2298
2296
2299
2297 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2300 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2298 def perffncacheload(ui, repo, **opts):
2301 def perffncacheload(ui, repo, **opts):
2299 opts = _byteskwargs(opts)
2302 opts = _byteskwargs(opts)
2300 timer, fm = gettimer(ui, opts)
2303 timer, fm = gettimer(ui, opts)
2301 s = repo.store
2304 s = repo.store
2302
2305
2303 def d():
2306 def d():
2304 s.fncache._load()
2307 s.fncache._load()
2305
2308
2306 timer(d)
2309 timer(d)
2307 fm.end()
2310 fm.end()
2308
2311
2309
2312
2310 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2313 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2311 def perffncachewrite(ui, repo, **opts):
2314 def perffncachewrite(ui, repo, **opts):
2312 opts = _byteskwargs(opts)
2315 opts = _byteskwargs(opts)
2313 timer, fm = gettimer(ui, opts)
2316 timer, fm = gettimer(ui, opts)
2314 s = repo.store
2317 s = repo.store
2315 lock = repo.lock()
2318 lock = repo.lock()
2316 s.fncache._load()
2319 s.fncache._load()
2317 tr = repo.transaction(b'perffncachewrite')
2320 tr = repo.transaction(b'perffncachewrite')
2318 tr.addbackup(b'fncache')
2321 tr.addbackup(b'fncache')
2319
2322
2320 def d():
2323 def d():
2321 s.fncache._dirty = True
2324 s.fncache._dirty = True
2322 s.fncache.write(tr)
2325 s.fncache.write(tr)
2323
2326
2324 timer(d)
2327 timer(d)
2325 tr.close()
2328 tr.close()
2326 lock.release()
2329 lock.release()
2327 fm.end()
2330 fm.end()
2328
2331
2329
2332
2330 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2333 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2331 def perffncacheencode(ui, repo, **opts):
2334 def perffncacheencode(ui, repo, **opts):
2332 opts = _byteskwargs(opts)
2335 opts = _byteskwargs(opts)
2333 timer, fm = gettimer(ui, opts)
2336 timer, fm = gettimer(ui, opts)
2334 s = repo.store
2337 s = repo.store
2335 s.fncache._load()
2338 s.fncache._load()
2336
2339
2337 def d():
2340 def d():
2338 for p in s.fncache.entries:
2341 for p in s.fncache.entries:
2339 s.encode(p)
2342 s.encode(p)
2340
2343
2341 timer(d)
2344 timer(d)
2342 fm.end()
2345 fm.end()
2343
2346
2344
2347
2345 def _bdiffworker(q, blocks, xdiff, ready, done):
2348 def _bdiffworker(q, blocks, xdiff, ready, done):
2346 while not done.is_set():
2349 while not done.is_set():
2347 pair = q.get()
2350 pair = q.get()
2348 while pair is not None:
2351 while pair is not None:
2349 if xdiff:
2352 if xdiff:
2350 mdiff.bdiff.xdiffblocks(*pair)
2353 mdiff.bdiff.xdiffblocks(*pair)
2351 elif blocks:
2354 elif blocks:
2352 mdiff.bdiff.blocks(*pair)
2355 mdiff.bdiff.blocks(*pair)
2353 else:
2356 else:
2354 mdiff.textdiff(*pair)
2357 mdiff.textdiff(*pair)
2355 q.task_done()
2358 q.task_done()
2356 pair = q.get()
2359 pair = q.get()
2357 q.task_done() # for the None one
2360 q.task_done() # for the None one
2358 with ready:
2361 with ready:
2359 ready.wait()
2362 ready.wait()
2360
2363
2361
2364
2362 def _manifestrevision(repo, mnode):
2365 def _manifestrevision(repo, mnode):
2363 ml = repo.manifestlog
2366 ml = repo.manifestlog
2364
2367
2365 if util.safehasattr(ml, b'getstorage'):
2368 if util.safehasattr(ml, b'getstorage'):
2366 store = ml.getstorage(b'')
2369 store = ml.getstorage(b'')
2367 else:
2370 else:
2368 store = ml._revlog
2371 store = ml._revlog
2369
2372
2370 return store.revision(mnode)
2373 return store.revision(mnode)
2371
2374
2372
2375
2373 @command(
2376 @command(
2374 b'perf::bdiff|perfbdiff',
2377 b'perf::bdiff|perfbdiff',
2375 revlogopts
2378 revlogopts
2376 + formatteropts
2379 + formatteropts
2377 + [
2380 + [
2378 (
2381 (
2379 b'',
2382 b'',
2380 b'count',
2383 b'count',
2381 1,
2384 1,
2382 b'number of revisions to test (when using --startrev)',
2385 b'number of revisions to test (when using --startrev)',
2383 ),
2386 ),
2384 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2387 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2385 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2388 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2386 (b'', b'blocks', False, b'test computing diffs into blocks'),
2389 (b'', b'blocks', False, b'test computing diffs into blocks'),
2387 (b'', b'xdiff', False, b'use xdiff algorithm'),
2390 (b'', b'xdiff', False, b'use xdiff algorithm'),
2388 ],
2391 ],
2389 b'-c|-m|FILE REV',
2392 b'-c|-m|FILE REV',
2390 )
2393 )
2391 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2394 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2392 """benchmark a bdiff between revisions
2395 """benchmark a bdiff between revisions
2393
2396
2394 By default, benchmark a bdiff between its delta parent and itself.
2397 By default, benchmark a bdiff between its delta parent and itself.
2395
2398
2396 With ``--count``, benchmark bdiffs between delta parents and self for N
2399 With ``--count``, benchmark bdiffs between delta parents and self for N
2397 revisions starting at the specified revision.
2400 revisions starting at the specified revision.
2398
2401
2399 With ``--alldata``, assume the requested revision is a changeset and
2402 With ``--alldata``, assume the requested revision is a changeset and
2400 measure bdiffs for all changes related to that changeset (manifest
2403 measure bdiffs for all changes related to that changeset (manifest
2401 and filelogs).
2404 and filelogs).
2402 """
2405 """
2403 opts = _byteskwargs(opts)
2406 opts = _byteskwargs(opts)
2404
2407
2405 if opts[b'xdiff'] and not opts[b'blocks']:
2408 if opts[b'xdiff'] and not opts[b'blocks']:
2406 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2409 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2407
2410
2408 if opts[b'alldata']:
2411 if opts[b'alldata']:
2409 opts[b'changelog'] = True
2412 opts[b'changelog'] = True
2410
2413
2411 if opts.get(b'changelog') or opts.get(b'manifest'):
2414 if opts.get(b'changelog') or opts.get(b'manifest'):
2412 file_, rev = None, file_
2415 file_, rev = None, file_
2413 elif rev is None:
2416 elif rev is None:
2414 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2417 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2415
2418
2416 blocks = opts[b'blocks']
2419 blocks = opts[b'blocks']
2417 xdiff = opts[b'xdiff']
2420 xdiff = opts[b'xdiff']
2418 textpairs = []
2421 textpairs = []
2419
2422
2420 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2423 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2421
2424
2422 startrev = r.rev(r.lookup(rev))
2425 startrev = r.rev(r.lookup(rev))
2423 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2426 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2424 if opts[b'alldata']:
2427 if opts[b'alldata']:
2425 # Load revisions associated with changeset.
2428 # Load revisions associated with changeset.
2426 ctx = repo[rev]
2429 ctx = repo[rev]
2427 mtext = _manifestrevision(repo, ctx.manifestnode())
2430 mtext = _manifestrevision(repo, ctx.manifestnode())
2428 for pctx in ctx.parents():
2431 for pctx in ctx.parents():
2429 pman = _manifestrevision(repo, pctx.manifestnode())
2432 pman = _manifestrevision(repo, pctx.manifestnode())
2430 textpairs.append((pman, mtext))
2433 textpairs.append((pman, mtext))
2431
2434
2432 # Load filelog revisions by iterating manifest delta.
2435 # Load filelog revisions by iterating manifest delta.
2433 man = ctx.manifest()
2436 man = ctx.manifest()
2434 pman = ctx.p1().manifest()
2437 pman = ctx.p1().manifest()
2435 for filename, change in pman.diff(man).items():
2438 for filename, change in pman.diff(man).items():
2436 fctx = repo.file(filename)
2439 fctx = repo.file(filename)
2437 f1 = fctx.revision(change[0][0] or -1)
2440 f1 = fctx.revision(change[0][0] or -1)
2438 f2 = fctx.revision(change[1][0] or -1)
2441 f2 = fctx.revision(change[1][0] or -1)
2439 textpairs.append((f1, f2))
2442 textpairs.append((f1, f2))
2440 else:
2443 else:
2441 dp = r.deltaparent(rev)
2444 dp = r.deltaparent(rev)
2442 textpairs.append((r.revision(dp), r.revision(rev)))
2445 textpairs.append((r.revision(dp), r.revision(rev)))
2443
2446
2444 withthreads = threads > 0
2447 withthreads = threads > 0
2445 if not withthreads:
2448 if not withthreads:
2446
2449
2447 def d():
2450 def d():
2448 for pair in textpairs:
2451 for pair in textpairs:
2449 if xdiff:
2452 if xdiff:
2450 mdiff.bdiff.xdiffblocks(*pair)
2453 mdiff.bdiff.xdiffblocks(*pair)
2451 elif blocks:
2454 elif blocks:
2452 mdiff.bdiff.blocks(*pair)
2455 mdiff.bdiff.blocks(*pair)
2453 else:
2456 else:
2454 mdiff.textdiff(*pair)
2457 mdiff.textdiff(*pair)
2455
2458
2456 else:
2459 else:
2457 q = queue()
2460 q = queue()
2458 for i in _xrange(threads):
2461 for i in _xrange(threads):
2459 q.put(None)
2462 q.put(None)
2460 ready = threading.Condition()
2463 ready = threading.Condition()
2461 done = threading.Event()
2464 done = threading.Event()
2462 for i in _xrange(threads):
2465 for i in _xrange(threads):
2463 threading.Thread(
2466 threading.Thread(
2464 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2467 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2465 ).start()
2468 ).start()
2466 q.join()
2469 q.join()
2467
2470
2468 def d():
2471 def d():
2469 for pair in textpairs:
2472 for pair in textpairs:
2470 q.put(pair)
2473 q.put(pair)
2471 for i in _xrange(threads):
2474 for i in _xrange(threads):
2472 q.put(None)
2475 q.put(None)
2473 with ready:
2476 with ready:
2474 ready.notify_all()
2477 ready.notify_all()
2475 q.join()
2478 q.join()
2476
2479
2477 timer, fm = gettimer(ui, opts)
2480 timer, fm = gettimer(ui, opts)
2478 timer(d)
2481 timer(d)
2479 fm.end()
2482 fm.end()
2480
2483
2481 if withthreads:
2484 if withthreads:
2482 done.set()
2485 done.set()
2483 for i in _xrange(threads):
2486 for i in _xrange(threads):
2484 q.put(None)
2487 q.put(None)
2485 with ready:
2488 with ready:
2486 ready.notify_all()
2489 ready.notify_all()
2487
2490
2488
2491
2489 @command(
2492 @command(
2490 b'perf::unidiff|perfunidiff',
2493 b'perf::unidiff|perfunidiff',
2491 revlogopts
2494 revlogopts
2492 + formatteropts
2495 + formatteropts
2493 + [
2496 + [
2494 (
2497 (
2495 b'',
2498 b'',
2496 b'count',
2499 b'count',
2497 1,
2500 1,
2498 b'number of revisions to test (when using --startrev)',
2501 b'number of revisions to test (when using --startrev)',
2499 ),
2502 ),
2500 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2503 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2501 ],
2504 ],
2502 b'-c|-m|FILE REV',
2505 b'-c|-m|FILE REV',
2503 )
2506 )
2504 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2507 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2505 """benchmark a unified diff between revisions
2508 """benchmark a unified diff between revisions
2506
2509
2507 This doesn't include any copy tracing - it's just a unified diff
2510 This doesn't include any copy tracing - it's just a unified diff
2508 of the texts.
2511 of the texts.
2509
2512
2510 By default, benchmark a diff between its delta parent and itself.
2513 By default, benchmark a diff between its delta parent and itself.
2511
2514
2512 With ``--count``, benchmark diffs between delta parents and self for N
2515 With ``--count``, benchmark diffs between delta parents and self for N
2513 revisions starting at the specified revision.
2516 revisions starting at the specified revision.
2514
2517
2515 With ``--alldata``, assume the requested revision is a changeset and
2518 With ``--alldata``, assume the requested revision is a changeset and
2516 measure diffs for all changes related to that changeset (manifest
2519 measure diffs for all changes related to that changeset (manifest
2517 and filelogs).
2520 and filelogs).
2518 """
2521 """
2519 opts = _byteskwargs(opts)
2522 opts = _byteskwargs(opts)
2520 if opts[b'alldata']:
2523 if opts[b'alldata']:
2521 opts[b'changelog'] = True
2524 opts[b'changelog'] = True
2522
2525
2523 if opts.get(b'changelog') or opts.get(b'manifest'):
2526 if opts.get(b'changelog') or opts.get(b'manifest'):
2524 file_, rev = None, file_
2527 file_, rev = None, file_
2525 elif rev is None:
2528 elif rev is None:
2526 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2529 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2527
2530
2528 textpairs = []
2531 textpairs = []
2529
2532
2530 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2533 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2531
2534
2532 startrev = r.rev(r.lookup(rev))
2535 startrev = r.rev(r.lookup(rev))
2533 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2536 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2534 if opts[b'alldata']:
2537 if opts[b'alldata']:
2535 # Load revisions associated with changeset.
2538 # Load revisions associated with changeset.
2536 ctx = repo[rev]
2539 ctx = repo[rev]
2537 mtext = _manifestrevision(repo, ctx.manifestnode())
2540 mtext = _manifestrevision(repo, ctx.manifestnode())
2538 for pctx in ctx.parents():
2541 for pctx in ctx.parents():
2539 pman = _manifestrevision(repo, pctx.manifestnode())
2542 pman = _manifestrevision(repo, pctx.manifestnode())
2540 textpairs.append((pman, mtext))
2543 textpairs.append((pman, mtext))
2541
2544
2542 # Load filelog revisions by iterating manifest delta.
2545 # Load filelog revisions by iterating manifest delta.
2543 man = ctx.manifest()
2546 man = ctx.manifest()
2544 pman = ctx.p1().manifest()
2547 pman = ctx.p1().manifest()
2545 for filename, change in pman.diff(man).items():
2548 for filename, change in pman.diff(man).items():
2546 fctx = repo.file(filename)
2549 fctx = repo.file(filename)
2547 f1 = fctx.revision(change[0][0] or -1)
2550 f1 = fctx.revision(change[0][0] or -1)
2548 f2 = fctx.revision(change[1][0] or -1)
2551 f2 = fctx.revision(change[1][0] or -1)
2549 textpairs.append((f1, f2))
2552 textpairs.append((f1, f2))
2550 else:
2553 else:
2551 dp = r.deltaparent(rev)
2554 dp = r.deltaparent(rev)
2552 textpairs.append((r.revision(dp), r.revision(rev)))
2555 textpairs.append((r.revision(dp), r.revision(rev)))
2553
2556
2554 def d():
2557 def d():
2555 for left, right in textpairs:
2558 for left, right in textpairs:
2556 # The date strings don't matter, so we pass empty strings.
2559 # The date strings don't matter, so we pass empty strings.
2557 headerlines, hunks = mdiff.unidiff(
2560 headerlines, hunks = mdiff.unidiff(
2558 left, b'', right, b'', b'left', b'right', binary=False
2561 left, b'', right, b'', b'left', b'right', binary=False
2559 )
2562 )
2560 # consume iterators in roughly the way patch.py does
2563 # consume iterators in roughly the way patch.py does
2561 b'\n'.join(headerlines)
2564 b'\n'.join(headerlines)
2562 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2565 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2563
2566
2564 timer, fm = gettimer(ui, opts)
2567 timer, fm = gettimer(ui, opts)
2565 timer(d)
2568 timer(d)
2566 fm.end()
2569 fm.end()
2567
2570
2568
2571
2569 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2572 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2570 def perfdiffwd(ui, repo, **opts):
2573 def perfdiffwd(ui, repo, **opts):
2571 """Profile diff of working directory changes"""
2574 """Profile diff of working directory changes"""
2572 opts = _byteskwargs(opts)
2575 opts = _byteskwargs(opts)
2573 timer, fm = gettimer(ui, opts)
2576 timer, fm = gettimer(ui, opts)
2574 options = {
2577 options = {
2575 'w': 'ignore_all_space',
2578 'w': 'ignore_all_space',
2576 'b': 'ignore_space_change',
2579 'b': 'ignore_space_change',
2577 'B': 'ignore_blank_lines',
2580 'B': 'ignore_blank_lines',
2578 }
2581 }
2579
2582
2580 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2583 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2581 opts = {options[c]: b'1' for c in diffopt}
2584 opts = {options[c]: b'1' for c in diffopt}
2582
2585
2583 def d():
2586 def d():
2584 ui.pushbuffer()
2587 ui.pushbuffer()
2585 commands.diff(ui, repo, **opts)
2588 commands.diff(ui, repo, **opts)
2586 ui.popbuffer()
2589 ui.popbuffer()
2587
2590
2588 diffopt = diffopt.encode('ascii')
2591 diffopt = diffopt.encode('ascii')
2589 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2592 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2590 timer(d, title=title)
2593 timer(d, title=title)
2591 fm.end()
2594 fm.end()
2592
2595
2593
2596
2594 @command(
2597 @command(
2595 b'perf::revlogindex|perfrevlogindex',
2598 b'perf::revlogindex|perfrevlogindex',
2596 revlogopts + formatteropts,
2599 revlogopts + formatteropts,
2597 b'-c|-m|FILE',
2600 b'-c|-m|FILE',
2598 )
2601 )
2599 def perfrevlogindex(ui, repo, file_=None, **opts):
2602 def perfrevlogindex(ui, repo, file_=None, **opts):
2600 """Benchmark operations against a revlog index.
2603 """Benchmark operations against a revlog index.
2601
2604
2602 This tests constructing a revlog instance, reading index data,
2605 This tests constructing a revlog instance, reading index data,
2603 parsing index data, and performing various operations related to
2606 parsing index data, and performing various operations related to
2604 index data.
2607 index data.
2605 """
2608 """
2606
2609
2607 opts = _byteskwargs(opts)
2610 opts = _byteskwargs(opts)
2608
2611
2609 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2612 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2610
2613
2611 opener = getattr(rl, 'opener') # trick linter
2614 opener = getattr(rl, 'opener') # trick linter
2612 # compat with hg <= 5.8
2615 # compat with hg <= 5.8
2616 radix = getattr(rl, 'radix', None)
2613 indexfile = getattr(rl, '_indexfile', None)
2617 indexfile = getattr(rl, '_indexfile', None)
2614 if indexfile is None:
2618 if indexfile is None:
2615 # compatibility with <= hg-5.8
2619 # compatibility with <= hg-5.8
2616 indexfile = getattr(rl, 'indexfile')
2620 indexfile = getattr(rl, 'indexfile')
2617 data = opener.read(indexfile)
2621 data = opener.read(indexfile)
2618
2622
2619 header = struct.unpack(b'>I', data[0:4])[0]
2623 header = struct.unpack(b'>I', data[0:4])[0]
2620 version = header & 0xFFFF
2624 version = header & 0xFFFF
2621 if version == 1:
2625 if version == 1:
2622 inline = header & (1 << 16)
2626 inline = header & (1 << 16)
2623 else:
2627 else:
2624 raise error.Abort(b'unsupported revlog version: %d' % version)
2628 raise error.Abort(b'unsupported revlog version: %d' % version)
2625
2629
2626 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2630 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2627 if parse_index_v1 is None:
2631 if parse_index_v1 is None:
2628 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2632 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2629
2633
2630 rllen = len(rl)
2634 rllen = len(rl)
2631
2635
2632 node0 = rl.node(0)
2636 node0 = rl.node(0)
2633 node25 = rl.node(rllen // 4)
2637 node25 = rl.node(rllen // 4)
2634 node50 = rl.node(rllen // 2)
2638 node50 = rl.node(rllen // 2)
2635 node75 = rl.node(rllen // 4 * 3)
2639 node75 = rl.node(rllen // 4 * 3)
2636 node100 = rl.node(rllen - 1)
2640 node100 = rl.node(rllen - 1)
2637
2641
2638 allrevs = range(rllen)
2642 allrevs = range(rllen)
2639 allrevsrev = list(reversed(allrevs))
2643 allrevsrev = list(reversed(allrevs))
2640 allnodes = [rl.node(rev) for rev in range(rllen)]
2644 allnodes = [rl.node(rev) for rev in range(rllen)]
2641 allnodesrev = list(reversed(allnodes))
2645 allnodesrev = list(reversed(allnodes))
2642
2646
2643 def constructor():
2647 def constructor():
2648 if radix is not None:
2649 revlog(opener, radix=radix)
2650 else:
2651 # hg <= 5.8
2644 revlog(opener, indexfile=indexfile)
2652 revlog(opener, indexfile=indexfile)
2645
2653
2646 def read():
2654 def read():
2647 with opener(indexfile) as fh:
2655 with opener(indexfile) as fh:
2648 fh.read()
2656 fh.read()
2649
2657
2650 def parseindex():
2658 def parseindex():
2651 parse_index_v1(data, inline)
2659 parse_index_v1(data, inline)
2652
2660
2653 def getentry(revornode):
2661 def getentry(revornode):
2654 index = parse_index_v1(data, inline)[0]
2662 index = parse_index_v1(data, inline)[0]
2655 index[revornode]
2663 index[revornode]
2656
2664
2657 def getentries(revs, count=1):
2665 def getentries(revs, count=1):
2658 index = parse_index_v1(data, inline)[0]
2666 index = parse_index_v1(data, inline)[0]
2659
2667
2660 for i in range(count):
2668 for i in range(count):
2661 for rev in revs:
2669 for rev in revs:
2662 index[rev]
2670 index[rev]
2663
2671
2664 def resolvenode(node):
2672 def resolvenode(node):
2665 index = parse_index_v1(data, inline)[0]
2673 index = parse_index_v1(data, inline)[0]
2666 rev = getattr(index, 'rev', None)
2674 rev = getattr(index, 'rev', None)
2667 if rev is None:
2675 if rev is None:
2668 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2676 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2669 # This only works for the C code.
2677 # This only works for the C code.
2670 if nodemap is None:
2678 if nodemap is None:
2671 return
2679 return
2672 rev = nodemap.__getitem__
2680 rev = nodemap.__getitem__
2673
2681
2674 try:
2682 try:
2675 rev(node)
2683 rev(node)
2676 except error.RevlogError:
2684 except error.RevlogError:
2677 pass
2685 pass
2678
2686
2679 def resolvenodes(nodes, count=1):
2687 def resolvenodes(nodes, count=1):
2680 index = parse_index_v1(data, inline)[0]
2688 index = parse_index_v1(data, inline)[0]
2681 rev = getattr(index, 'rev', None)
2689 rev = getattr(index, 'rev', None)
2682 if rev is None:
2690 if rev is None:
2683 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2691 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2684 # This only works for the C code.
2692 # This only works for the C code.
2685 if nodemap is None:
2693 if nodemap is None:
2686 return
2694 return
2687 rev = nodemap.__getitem__
2695 rev = nodemap.__getitem__
2688
2696
2689 for i in range(count):
2697 for i in range(count):
2690 for node in nodes:
2698 for node in nodes:
2691 try:
2699 try:
2692 rev(node)
2700 rev(node)
2693 except error.RevlogError:
2701 except error.RevlogError:
2694 pass
2702 pass
2695
2703
2696 benches = [
2704 benches = [
2697 (constructor, b'revlog constructor'),
2705 (constructor, b'revlog constructor'),
2698 (read, b'read'),
2706 (read, b'read'),
2699 (parseindex, b'create index object'),
2707 (parseindex, b'create index object'),
2700 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2708 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2701 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2709 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2702 (lambda: resolvenode(node0), b'look up node at rev 0'),
2710 (lambda: resolvenode(node0), b'look up node at rev 0'),
2703 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2711 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2704 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2712 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2705 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2713 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2706 (lambda: resolvenode(node100), b'look up node at tip'),
2714 (lambda: resolvenode(node100), b'look up node at tip'),
2707 # 2x variation is to measure caching impact.
2715 # 2x variation is to measure caching impact.
2708 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2716 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2709 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2717 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2710 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2718 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2711 (
2719 (
2712 lambda: resolvenodes(allnodesrev, 2),
2720 lambda: resolvenodes(allnodesrev, 2),
2713 b'look up all nodes 2x (reverse)',
2721 b'look up all nodes 2x (reverse)',
2714 ),
2722 ),
2715 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2723 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2716 (
2724 (
2717 lambda: getentries(allrevs, 2),
2725 lambda: getentries(allrevs, 2),
2718 b'retrieve all index entries 2x (forward)',
2726 b'retrieve all index entries 2x (forward)',
2719 ),
2727 ),
2720 (
2728 (
2721 lambda: getentries(allrevsrev),
2729 lambda: getentries(allrevsrev),
2722 b'retrieve all index entries (reverse)',
2730 b'retrieve all index entries (reverse)',
2723 ),
2731 ),
2724 (
2732 (
2725 lambda: getentries(allrevsrev, 2),
2733 lambda: getentries(allrevsrev, 2),
2726 b'retrieve all index entries 2x (reverse)',
2734 b'retrieve all index entries 2x (reverse)',
2727 ),
2735 ),
2728 ]
2736 ]
2729
2737
2730 for fn, title in benches:
2738 for fn, title in benches:
2731 timer, fm = gettimer(ui, opts)
2739 timer, fm = gettimer(ui, opts)
2732 timer(fn, title=title)
2740 timer(fn, title=title)
2733 fm.end()
2741 fm.end()
2734
2742
2735
2743
2736 @command(
2744 @command(
2737 b'perf::revlogrevisions|perfrevlogrevisions',
2745 b'perf::revlogrevisions|perfrevlogrevisions',
2738 revlogopts
2746 revlogopts
2739 + formatteropts
2747 + formatteropts
2740 + [
2748 + [
2741 (b'd', b'dist', 100, b'distance between the revisions'),
2749 (b'd', b'dist', 100, b'distance between the revisions'),
2742 (b's', b'startrev', 0, b'revision to start reading at'),
2750 (b's', b'startrev', 0, b'revision to start reading at'),
2743 (b'', b'reverse', False, b'read in reverse'),
2751 (b'', b'reverse', False, b'read in reverse'),
2744 ],
2752 ],
2745 b'-c|-m|FILE',
2753 b'-c|-m|FILE',
2746 )
2754 )
2747 def perfrevlogrevisions(
2755 def perfrevlogrevisions(
2748 ui, repo, file_=None, startrev=0, reverse=False, **opts
2756 ui, repo, file_=None, startrev=0, reverse=False, **opts
2749 ):
2757 ):
2750 """Benchmark reading a series of revisions from a revlog.
2758 """Benchmark reading a series of revisions from a revlog.
2751
2759
2752 By default, we read every ``-d/--dist`` revision from 0 to tip of
2760 By default, we read every ``-d/--dist`` revision from 0 to tip of
2753 the specified revlog.
2761 the specified revlog.
2754
2762
2755 The start revision can be defined via ``-s/--startrev``.
2763 The start revision can be defined via ``-s/--startrev``.
2756 """
2764 """
2757 opts = _byteskwargs(opts)
2765 opts = _byteskwargs(opts)
2758
2766
2759 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2767 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2760 rllen = getlen(ui)(rl)
2768 rllen = getlen(ui)(rl)
2761
2769
2762 if startrev < 0:
2770 if startrev < 0:
2763 startrev = rllen + startrev
2771 startrev = rllen + startrev
2764
2772
2765 def d():
2773 def d():
2766 rl.clearcaches()
2774 rl.clearcaches()
2767
2775
2768 beginrev = startrev
2776 beginrev = startrev
2769 endrev = rllen
2777 endrev = rllen
2770 dist = opts[b'dist']
2778 dist = opts[b'dist']
2771
2779
2772 if reverse:
2780 if reverse:
2773 beginrev, endrev = endrev - 1, beginrev - 1
2781 beginrev, endrev = endrev - 1, beginrev - 1
2774 dist = -1 * dist
2782 dist = -1 * dist
2775
2783
2776 for x in _xrange(beginrev, endrev, dist):
2784 for x in _xrange(beginrev, endrev, dist):
2777 # Old revisions don't support passing int.
2785 # Old revisions don't support passing int.
2778 n = rl.node(x)
2786 n = rl.node(x)
2779 rl.revision(n)
2787 rl.revision(n)
2780
2788
2781 timer, fm = gettimer(ui, opts)
2789 timer, fm = gettimer(ui, opts)
2782 timer(d)
2790 timer(d)
2783 fm.end()
2791 fm.end()
2784
2792
2785
2793
2786 @command(
2794 @command(
2787 b'perf::revlogwrite|perfrevlogwrite',
2795 b'perf::revlogwrite|perfrevlogwrite',
2788 revlogopts
2796 revlogopts
2789 + formatteropts
2797 + formatteropts
2790 + [
2798 + [
2791 (b's', b'startrev', 1000, b'revision to start writing at'),
2799 (b's', b'startrev', 1000, b'revision to start writing at'),
2792 (b'', b'stoprev', -1, b'last revision to write'),
2800 (b'', b'stoprev', -1, b'last revision to write'),
2793 (b'', b'count', 3, b'number of passes to perform'),
2801 (b'', b'count', 3, b'number of passes to perform'),
2794 (b'', b'details', False, b'print timing for every revisions tested'),
2802 (b'', b'details', False, b'print timing for every revisions tested'),
2795 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2803 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2796 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2804 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2797 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2805 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2798 ],
2806 ],
2799 b'-c|-m|FILE',
2807 b'-c|-m|FILE',
2800 )
2808 )
2801 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2809 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2802 """Benchmark writing a series of revisions to a revlog.
2810 """Benchmark writing a series of revisions to a revlog.
2803
2811
2804 Possible source values are:
2812 Possible source values are:
2805 * `full`: add from a full text (default).
2813 * `full`: add from a full text (default).
2806 * `parent-1`: add from a delta to the first parent
2814 * `parent-1`: add from a delta to the first parent
2807 * `parent-2`: add from a delta to the second parent if it exists
2815 * `parent-2`: add from a delta to the second parent if it exists
2808 (use a delta from the first parent otherwise)
2816 (use a delta from the first parent otherwise)
2809 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2817 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2810 * `storage`: add from the existing precomputed deltas
2818 * `storage`: add from the existing precomputed deltas
2811
2819
2812 Note: This performance command measures performance in a custom way. As a
2820 Note: This performance command measures performance in a custom way. As a
2813 result some of the global configuration of the 'perf' command does not
2821 result some of the global configuration of the 'perf' command does not
2814 apply to it:
2822 apply to it:
2815
2823
2816 * ``pre-run``: disabled
2824 * ``pre-run``: disabled
2817
2825
2818 * ``profile-benchmark``: disabled
2826 * ``profile-benchmark``: disabled
2819
2827
2820 * ``run-limits``: disabled use --count instead
2828 * ``run-limits``: disabled use --count instead
2821 """
2829 """
2822 opts = _byteskwargs(opts)
2830 opts = _byteskwargs(opts)
2823
2831
2824 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2832 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2825 rllen = getlen(ui)(rl)
2833 rllen = getlen(ui)(rl)
2826 if startrev < 0:
2834 if startrev < 0:
2827 startrev = rllen + startrev
2835 startrev = rllen + startrev
2828 if stoprev < 0:
2836 if stoprev < 0:
2829 stoprev = rllen + stoprev
2837 stoprev = rllen + stoprev
2830
2838
2831 lazydeltabase = opts['lazydeltabase']
2839 lazydeltabase = opts['lazydeltabase']
2832 source = opts['source']
2840 source = opts['source']
2833 clearcaches = opts['clear_caches']
2841 clearcaches = opts['clear_caches']
2834 validsource = (
2842 validsource = (
2835 b'full',
2843 b'full',
2836 b'parent-1',
2844 b'parent-1',
2837 b'parent-2',
2845 b'parent-2',
2838 b'parent-smallest',
2846 b'parent-smallest',
2839 b'storage',
2847 b'storage',
2840 )
2848 )
2841 if source not in validsource:
2849 if source not in validsource:
2842 raise error.Abort('invalid source type: %s' % source)
2850 raise error.Abort('invalid source type: %s' % source)
2843
2851
2844 ### actually gather results
2852 ### actually gather results
2845 count = opts['count']
2853 count = opts['count']
2846 if count <= 0:
2854 if count <= 0:
2847 raise error.Abort('invalide run count: %d' % count)
2855 raise error.Abort('invalide run count: %d' % count)
2848 allresults = []
2856 allresults = []
2849 for c in range(count):
2857 for c in range(count):
2850 timing = _timeonewrite(
2858 timing = _timeonewrite(
2851 ui,
2859 ui,
2852 rl,
2860 rl,
2853 source,
2861 source,
2854 startrev,
2862 startrev,
2855 stoprev,
2863 stoprev,
2856 c + 1,
2864 c + 1,
2857 lazydeltabase=lazydeltabase,
2865 lazydeltabase=lazydeltabase,
2858 clearcaches=clearcaches,
2866 clearcaches=clearcaches,
2859 )
2867 )
2860 allresults.append(timing)
2868 allresults.append(timing)
2861
2869
2862 ### consolidate the results in a single list
2870 ### consolidate the results in a single list
2863 results = []
2871 results = []
2864 for idx, (rev, t) in enumerate(allresults[0]):
2872 for idx, (rev, t) in enumerate(allresults[0]):
2865 ts = [t]
2873 ts = [t]
2866 for other in allresults[1:]:
2874 for other in allresults[1:]:
2867 orev, ot = other[idx]
2875 orev, ot = other[idx]
2868 assert orev == rev
2876 assert orev == rev
2869 ts.append(ot)
2877 ts.append(ot)
2870 results.append((rev, ts))
2878 results.append((rev, ts))
2871 resultcount = len(results)
2879 resultcount = len(results)
2872
2880
2873 ### Compute and display relevant statistics
2881 ### Compute and display relevant statistics
2874
2882
2875 # get a formatter
2883 # get a formatter
2876 fm = ui.formatter(b'perf', opts)
2884 fm = ui.formatter(b'perf', opts)
2877 displayall = ui.configbool(b"perf", b"all-timing", False)
2885 displayall = ui.configbool(b"perf", b"all-timing", False)
2878
2886
2879 # print individual details if requested
2887 # print individual details if requested
2880 if opts['details']:
2888 if opts['details']:
2881 for idx, item in enumerate(results, 1):
2889 for idx, item in enumerate(results, 1):
2882 rev, data = item
2890 rev, data = item
2883 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2891 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2884 formatone(fm, data, title=title, displayall=displayall)
2892 formatone(fm, data, title=title, displayall=displayall)
2885
2893
2886 # sorts results by median time
2894 # sorts results by median time
2887 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2895 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2888 # list of (name, index) to display)
2896 # list of (name, index) to display)
2889 relevants = [
2897 relevants = [
2890 ("min", 0),
2898 ("min", 0),
2891 ("10%", resultcount * 10 // 100),
2899 ("10%", resultcount * 10 // 100),
2892 ("25%", resultcount * 25 // 100),
2900 ("25%", resultcount * 25 // 100),
2893 ("50%", resultcount * 70 // 100),
2901 ("50%", resultcount * 70 // 100),
2894 ("75%", resultcount * 75 // 100),
2902 ("75%", resultcount * 75 // 100),
2895 ("90%", resultcount * 90 // 100),
2903 ("90%", resultcount * 90 // 100),
2896 ("95%", resultcount * 95 // 100),
2904 ("95%", resultcount * 95 // 100),
2897 ("99%", resultcount * 99 // 100),
2905 ("99%", resultcount * 99 // 100),
2898 ("99.9%", resultcount * 999 // 1000),
2906 ("99.9%", resultcount * 999 // 1000),
2899 ("99.99%", resultcount * 9999 // 10000),
2907 ("99.99%", resultcount * 9999 // 10000),
2900 ("99.999%", resultcount * 99999 // 100000),
2908 ("99.999%", resultcount * 99999 // 100000),
2901 ("max", -1),
2909 ("max", -1),
2902 ]
2910 ]
2903 if not ui.quiet:
2911 if not ui.quiet:
2904 for name, idx in relevants:
2912 for name, idx in relevants:
2905 data = results[idx]
2913 data = results[idx]
2906 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2914 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2907 formatone(fm, data[1], title=title, displayall=displayall)
2915 formatone(fm, data[1], title=title, displayall=displayall)
2908
2916
2909 # XXX summing that many float will not be very precise, we ignore this fact
2917 # XXX summing that many float will not be very precise, we ignore this fact
2910 # for now
2918 # for now
2911 totaltime = []
2919 totaltime = []
2912 for item in allresults:
2920 for item in allresults:
2913 totaltime.append(
2921 totaltime.append(
2914 (
2922 (
2915 sum(x[1][0] for x in item),
2923 sum(x[1][0] for x in item),
2916 sum(x[1][1] for x in item),
2924 sum(x[1][1] for x in item),
2917 sum(x[1][2] for x in item),
2925 sum(x[1][2] for x in item),
2918 )
2926 )
2919 )
2927 )
2920 formatone(
2928 formatone(
2921 fm,
2929 fm,
2922 totaltime,
2930 totaltime,
2923 title="total time (%d revs)" % resultcount,
2931 title="total time (%d revs)" % resultcount,
2924 displayall=displayall,
2932 displayall=displayall,
2925 )
2933 )
2926 fm.end()
2934 fm.end()
2927
2935
2928
2936
2929 class _faketr(object):
2937 class _faketr(object):
2930 def add(s, x, y, z=None):
2938 def add(s, x, y, z=None):
2931 return None
2939 return None
2932
2940
2933
2941
2934 def _timeonewrite(
2942 def _timeonewrite(
2935 ui,
2943 ui,
2936 orig,
2944 orig,
2937 source,
2945 source,
2938 startrev,
2946 startrev,
2939 stoprev,
2947 stoprev,
2940 runidx=None,
2948 runidx=None,
2941 lazydeltabase=True,
2949 lazydeltabase=True,
2942 clearcaches=True,
2950 clearcaches=True,
2943 ):
2951 ):
2944 timings = []
2952 timings = []
2945 tr = _faketr()
2953 tr = _faketr()
2946 with _temprevlog(ui, orig, startrev) as dest:
2954 with _temprevlog(ui, orig, startrev) as dest:
2947 dest._lazydeltabase = lazydeltabase
2955 dest._lazydeltabase = lazydeltabase
2948 revs = list(orig.revs(startrev, stoprev))
2956 revs = list(orig.revs(startrev, stoprev))
2949 total = len(revs)
2957 total = len(revs)
2950 topic = 'adding'
2958 topic = 'adding'
2951 if runidx is not None:
2959 if runidx is not None:
2952 topic += ' (run #%d)' % runidx
2960 topic += ' (run #%d)' % runidx
2953 # Support both old and new progress API
2961 # Support both old and new progress API
2954 if util.safehasattr(ui, 'makeprogress'):
2962 if util.safehasattr(ui, 'makeprogress'):
2955 progress = ui.makeprogress(topic, unit='revs', total=total)
2963 progress = ui.makeprogress(topic, unit='revs', total=total)
2956
2964
2957 def updateprogress(pos):
2965 def updateprogress(pos):
2958 progress.update(pos)
2966 progress.update(pos)
2959
2967
2960 def completeprogress():
2968 def completeprogress():
2961 progress.complete()
2969 progress.complete()
2962
2970
2963 else:
2971 else:
2964
2972
2965 def updateprogress(pos):
2973 def updateprogress(pos):
2966 ui.progress(topic, pos, unit='revs', total=total)
2974 ui.progress(topic, pos, unit='revs', total=total)
2967
2975
2968 def completeprogress():
2976 def completeprogress():
2969 ui.progress(topic, None, unit='revs', total=total)
2977 ui.progress(topic, None, unit='revs', total=total)
2970
2978
2971 for idx, rev in enumerate(revs):
2979 for idx, rev in enumerate(revs):
2972 updateprogress(idx)
2980 updateprogress(idx)
2973 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2981 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2974 if clearcaches:
2982 if clearcaches:
2975 dest.index.clearcaches()
2983 dest.index.clearcaches()
2976 dest.clearcaches()
2984 dest.clearcaches()
2977 with timeone() as r:
2985 with timeone() as r:
2978 dest.addrawrevision(*addargs, **addkwargs)
2986 dest.addrawrevision(*addargs, **addkwargs)
2979 timings.append((rev, r[0]))
2987 timings.append((rev, r[0]))
2980 updateprogress(total)
2988 updateprogress(total)
2981 completeprogress()
2989 completeprogress()
2982 return timings
2990 return timings
2983
2991
2984
2992
2985 def _getrevisionseed(orig, rev, tr, source):
2993 def _getrevisionseed(orig, rev, tr, source):
2986 from mercurial.node import nullid
2994 from mercurial.node import nullid
2987
2995
2988 linkrev = orig.linkrev(rev)
2996 linkrev = orig.linkrev(rev)
2989 node = orig.node(rev)
2997 node = orig.node(rev)
2990 p1, p2 = orig.parents(node)
2998 p1, p2 = orig.parents(node)
2991 flags = orig.flags(rev)
2999 flags = orig.flags(rev)
2992 cachedelta = None
3000 cachedelta = None
2993 text = None
3001 text = None
2994
3002
2995 if source == b'full':
3003 if source == b'full':
2996 text = orig.revision(rev)
3004 text = orig.revision(rev)
2997 elif source == b'parent-1':
3005 elif source == b'parent-1':
2998 baserev = orig.rev(p1)
3006 baserev = orig.rev(p1)
2999 cachedelta = (baserev, orig.revdiff(p1, rev))
3007 cachedelta = (baserev, orig.revdiff(p1, rev))
3000 elif source == b'parent-2':
3008 elif source == b'parent-2':
3001 parent = p2
3009 parent = p2
3002 if p2 == nullid:
3010 if p2 == nullid:
3003 parent = p1
3011 parent = p1
3004 baserev = orig.rev(parent)
3012 baserev = orig.rev(parent)
3005 cachedelta = (baserev, orig.revdiff(parent, rev))
3013 cachedelta = (baserev, orig.revdiff(parent, rev))
3006 elif source == b'parent-smallest':
3014 elif source == b'parent-smallest':
3007 p1diff = orig.revdiff(p1, rev)
3015 p1diff = orig.revdiff(p1, rev)
3008 parent = p1
3016 parent = p1
3009 diff = p1diff
3017 diff = p1diff
3010 if p2 != nullid:
3018 if p2 != nullid:
3011 p2diff = orig.revdiff(p2, rev)
3019 p2diff = orig.revdiff(p2, rev)
3012 if len(p1diff) > len(p2diff):
3020 if len(p1diff) > len(p2diff):
3013 parent = p2
3021 parent = p2
3014 diff = p2diff
3022 diff = p2diff
3015 baserev = orig.rev(parent)
3023 baserev = orig.rev(parent)
3016 cachedelta = (baserev, diff)
3024 cachedelta = (baserev, diff)
3017 elif source == b'storage':
3025 elif source == b'storage':
3018 baserev = orig.deltaparent(rev)
3026 baserev = orig.deltaparent(rev)
3019 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3027 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3020
3028
3021 return (
3029 return (
3022 (text, tr, linkrev, p1, p2),
3030 (text, tr, linkrev, p1, p2),
3023 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3031 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3024 )
3032 )
3025
3033
3026
3034
3027 @contextlib.contextmanager
3035 @contextlib.contextmanager
3028 def _temprevlog(ui, orig, truncaterev):
3036 def _temprevlog(ui, orig, truncaterev):
3029 from mercurial import vfs as vfsmod
3037 from mercurial import vfs as vfsmod
3030
3038
3031 if orig._inline:
3039 if orig._inline:
3032 raise error.Abort('not supporting inline revlog (yet)')
3040 raise error.Abort('not supporting inline revlog (yet)')
3033 revlogkwargs = {}
3041 revlogkwargs = {}
3034 k = 'upperboundcomp'
3042 k = 'upperboundcomp'
3035 if util.safehasattr(orig, k):
3043 if util.safehasattr(orig, k):
3036 revlogkwargs[k] = getattr(orig, k)
3044 revlogkwargs[k] = getattr(orig, k)
3037
3045
3038 indexfile = getattr(orig, '_indexfile', None)
3046 indexfile = getattr(orig, '_indexfile', None)
3039 if indexfile is None:
3047 if indexfile is None:
3040 # compatibility with <= hg-5.8
3048 # compatibility with <= hg-5.8
3041 indexfile = getattr(orig, 'indexfile')
3049 indexfile = getattr(orig, 'indexfile')
3042 origindexpath = orig.opener.join(indexfile)
3050 origindexpath = orig.opener.join(indexfile)
3043
3051
3044 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3052 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3045 origdatapath = orig.opener.join(datafile)
3053 origdatapath = orig.opener.join(datafile)
3046 indexname = 'revlog.i'
3054 radix = b'revlog'
3047 dataname = 'revlog.d'
3055 indexname = b'revlog.i'
3056 dataname = b'revlog.d'
3048
3057
3049 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3058 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3050 try:
3059 try:
3051 # copy the data file in a temporary directory
3060 # copy the data file in a temporary directory
3052 ui.debug('copying data in %s\n' % tmpdir)
3061 ui.debug('copying data in %s\n' % tmpdir)
3053 destindexpath = os.path.join(tmpdir, 'revlog.i')
3062 destindexpath = os.path.join(tmpdir, 'revlog.i')
3054 destdatapath = os.path.join(tmpdir, 'revlog.d')
3063 destdatapath = os.path.join(tmpdir, 'revlog.d')
3055 shutil.copyfile(origindexpath, destindexpath)
3064 shutil.copyfile(origindexpath, destindexpath)
3056 shutil.copyfile(origdatapath, destdatapath)
3065 shutil.copyfile(origdatapath, destdatapath)
3057
3066
3058 # remove the data we want to add again
3067 # remove the data we want to add again
3059 ui.debug('truncating data to be rewritten\n')
3068 ui.debug('truncating data to be rewritten\n')
3060 with open(destindexpath, 'ab') as index:
3069 with open(destindexpath, 'ab') as index:
3061 index.seek(0)
3070 index.seek(0)
3062 index.truncate(truncaterev * orig._io.size)
3071 index.truncate(truncaterev * orig._io.size)
3063 with open(destdatapath, 'ab') as data:
3072 with open(destdatapath, 'ab') as data:
3064 data.seek(0)
3073 data.seek(0)
3065 data.truncate(orig.start(truncaterev))
3074 data.truncate(orig.start(truncaterev))
3066
3075
3067 # instantiate a new revlog from the temporary copy
3076 # instantiate a new revlog from the temporary copy
3068 ui.debug('truncating adding to be rewritten\n')
3077 ui.debug('truncating adding to be rewritten\n')
3069 vfs = vfsmod.vfs(tmpdir)
3078 vfs = vfsmod.vfs(tmpdir)
3070 vfs.options = getattr(orig.opener, 'options', None)
3079 vfs.options = getattr(orig.opener, 'options', None)
3071
3080
3081 try:
3082 dest = revlog(vfs, radix=radix, **revlogkwargs)
3083 except TypeError:
3072 dest = revlog(
3084 dest = revlog(
3073 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3085 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3074 )
3086 )
3075 if dest._inline:
3087 if dest._inline:
3076 raise error.Abort('not supporting inline revlog (yet)')
3088 raise error.Abort('not supporting inline revlog (yet)')
3077 # make sure internals are initialized
3089 # make sure internals are initialized
3078 dest.revision(len(dest) - 1)
3090 dest.revision(len(dest) - 1)
3079 yield dest
3091 yield dest
3080 del dest, vfs
3092 del dest, vfs
3081 finally:
3093 finally:
3082 shutil.rmtree(tmpdir, True)
3094 shutil.rmtree(tmpdir, True)
3083
3095
3084
3096
3085 @command(
3097 @command(
3086 b'perf::revlogchunks|perfrevlogchunks',
3098 b'perf::revlogchunks|perfrevlogchunks',
3087 revlogopts
3099 revlogopts
3088 + formatteropts
3100 + formatteropts
3089 + [
3101 + [
3090 (b'e', b'engines', b'', b'compression engines to use'),
3102 (b'e', b'engines', b'', b'compression engines to use'),
3091 (b's', b'startrev', 0, b'revision to start at'),
3103 (b's', b'startrev', 0, b'revision to start at'),
3092 ],
3104 ],
3093 b'-c|-m|FILE',
3105 b'-c|-m|FILE',
3094 )
3106 )
3095 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3107 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3096 """Benchmark operations on revlog chunks.
3108 """Benchmark operations on revlog chunks.
3097
3109
3098 Logically, each revlog is a collection of fulltext revisions. However,
3110 Logically, each revlog is a collection of fulltext revisions. However,
3099 stored within each revlog are "chunks" of possibly compressed data. This
3111 stored within each revlog are "chunks" of possibly compressed data. This
3100 data needs to be read and decompressed or compressed and written.
3112 data needs to be read and decompressed or compressed and written.
3101
3113
3102 This command measures the time it takes to read+decompress and recompress
3114 This command measures the time it takes to read+decompress and recompress
3103 chunks in a revlog. It effectively isolates I/O and compression performance.
3115 chunks in a revlog. It effectively isolates I/O and compression performance.
3104 For measurements of higher-level operations like resolving revisions,
3116 For measurements of higher-level operations like resolving revisions,
3105 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3117 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3106 """
3118 """
3107 opts = _byteskwargs(opts)
3119 opts = _byteskwargs(opts)
3108
3120
3109 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3121 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3110
3122
3111 # _chunkraw was renamed to _getsegmentforrevs.
3123 # _chunkraw was renamed to _getsegmentforrevs.
3112 try:
3124 try:
3113 segmentforrevs = rl._getsegmentforrevs
3125 segmentforrevs = rl._getsegmentforrevs
3114 except AttributeError:
3126 except AttributeError:
3115 segmentforrevs = rl._chunkraw
3127 segmentforrevs = rl._chunkraw
3116
3128
3117 # Verify engines argument.
3129 # Verify engines argument.
3118 if engines:
3130 if engines:
3119 engines = {e.strip() for e in engines.split(b',')}
3131 engines = {e.strip() for e in engines.split(b',')}
3120 for engine in engines:
3132 for engine in engines:
3121 try:
3133 try:
3122 util.compressionengines[engine]
3134 util.compressionengines[engine]
3123 except KeyError:
3135 except KeyError:
3124 raise error.Abort(b'unknown compression engine: %s' % engine)
3136 raise error.Abort(b'unknown compression engine: %s' % engine)
3125 else:
3137 else:
3126 engines = []
3138 engines = []
3127 for e in util.compengines:
3139 for e in util.compengines:
3128 engine = util.compengines[e]
3140 engine = util.compengines[e]
3129 try:
3141 try:
3130 if engine.available():
3142 if engine.available():
3131 engine.revlogcompressor().compress(b'dummy')
3143 engine.revlogcompressor().compress(b'dummy')
3132 engines.append(e)
3144 engines.append(e)
3133 except NotImplementedError:
3145 except NotImplementedError:
3134 pass
3146 pass
3135
3147
3136 revs = list(rl.revs(startrev, len(rl) - 1))
3148 revs = list(rl.revs(startrev, len(rl) - 1))
3137
3149
3138 def rlfh(rl):
3150 def rlfh(rl):
3139 if rl._inline:
3151 if rl._inline:
3140 indexfile = getattr(rl, '_indexfile', None)
3152 indexfile = getattr(rl, '_indexfile', None)
3141 if indexfile is None:
3153 if indexfile is None:
3142 # compatibility with <= hg-5.8
3154 # compatibility with <= hg-5.8
3143 indexfile = getattr(rl, 'indexfile')
3155 indexfile = getattr(rl, 'indexfile')
3144 return getsvfs(repo)(indexfile)
3156 return getsvfs(repo)(indexfile)
3145 else:
3157 else:
3146 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3158 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3147 return getsvfs(repo)(datafile)
3159 return getsvfs(repo)(datafile)
3148
3160
3149 def doread():
3161 def doread():
3150 rl.clearcaches()
3162 rl.clearcaches()
3151 for rev in revs:
3163 for rev in revs:
3152 segmentforrevs(rev, rev)
3164 segmentforrevs(rev, rev)
3153
3165
3154 def doreadcachedfh():
3166 def doreadcachedfh():
3155 rl.clearcaches()
3167 rl.clearcaches()
3156 fh = rlfh(rl)
3168 fh = rlfh(rl)
3157 for rev in revs:
3169 for rev in revs:
3158 segmentforrevs(rev, rev, df=fh)
3170 segmentforrevs(rev, rev, df=fh)
3159
3171
3160 def doreadbatch():
3172 def doreadbatch():
3161 rl.clearcaches()
3173 rl.clearcaches()
3162 segmentforrevs(revs[0], revs[-1])
3174 segmentforrevs(revs[0], revs[-1])
3163
3175
3164 def doreadbatchcachedfh():
3176 def doreadbatchcachedfh():
3165 rl.clearcaches()
3177 rl.clearcaches()
3166 fh = rlfh(rl)
3178 fh = rlfh(rl)
3167 segmentforrevs(revs[0], revs[-1], df=fh)
3179 segmentforrevs(revs[0], revs[-1], df=fh)
3168
3180
3169 def dochunk():
3181 def dochunk():
3170 rl.clearcaches()
3182 rl.clearcaches()
3171 fh = rlfh(rl)
3183 fh = rlfh(rl)
3172 for rev in revs:
3184 for rev in revs:
3173 rl._chunk(rev, df=fh)
3185 rl._chunk(rev, df=fh)
3174
3186
3175 chunks = [None]
3187 chunks = [None]
3176
3188
3177 def dochunkbatch():
3189 def dochunkbatch():
3178 rl.clearcaches()
3190 rl.clearcaches()
3179 fh = rlfh(rl)
3191 fh = rlfh(rl)
3180 # Save chunks as a side-effect.
3192 # Save chunks as a side-effect.
3181 chunks[0] = rl._chunks(revs, df=fh)
3193 chunks[0] = rl._chunks(revs, df=fh)
3182
3194
3183 def docompress(compressor):
3195 def docompress(compressor):
3184 rl.clearcaches()
3196 rl.clearcaches()
3185
3197
3186 try:
3198 try:
3187 # Swap in the requested compression engine.
3199 # Swap in the requested compression engine.
3188 oldcompressor = rl._compressor
3200 oldcompressor = rl._compressor
3189 rl._compressor = compressor
3201 rl._compressor = compressor
3190 for chunk in chunks[0]:
3202 for chunk in chunks[0]:
3191 rl.compress(chunk)
3203 rl.compress(chunk)
3192 finally:
3204 finally:
3193 rl._compressor = oldcompressor
3205 rl._compressor = oldcompressor
3194
3206
3195 benches = [
3207 benches = [
3196 (lambda: doread(), b'read'),
3208 (lambda: doread(), b'read'),
3197 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3209 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3198 (lambda: doreadbatch(), b'read batch'),
3210 (lambda: doreadbatch(), b'read batch'),
3199 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3211 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3200 (lambda: dochunk(), b'chunk'),
3212 (lambda: dochunk(), b'chunk'),
3201 (lambda: dochunkbatch(), b'chunk batch'),
3213 (lambda: dochunkbatch(), b'chunk batch'),
3202 ]
3214 ]
3203
3215
3204 for engine in sorted(engines):
3216 for engine in sorted(engines):
3205 compressor = util.compengines[engine].revlogcompressor()
3217 compressor = util.compengines[engine].revlogcompressor()
3206 benches.append(
3218 benches.append(
3207 (
3219 (
3208 functools.partial(docompress, compressor),
3220 functools.partial(docompress, compressor),
3209 b'compress w/ %s' % engine,
3221 b'compress w/ %s' % engine,
3210 )
3222 )
3211 )
3223 )
3212
3224
3213 for fn, title in benches:
3225 for fn, title in benches:
3214 timer, fm = gettimer(ui, opts)
3226 timer, fm = gettimer(ui, opts)
3215 timer(fn, title=title)
3227 timer(fn, title=title)
3216 fm.end()
3228 fm.end()
3217
3229
3218
3230
3219 @command(
3231 @command(
3220 b'perf::revlogrevision|perfrevlogrevision',
3232 b'perf::revlogrevision|perfrevlogrevision',
3221 revlogopts
3233 revlogopts
3222 + formatteropts
3234 + formatteropts
3223 + [(b'', b'cache', False, b'use caches instead of clearing')],
3235 + [(b'', b'cache', False, b'use caches instead of clearing')],
3224 b'-c|-m|FILE REV',
3236 b'-c|-m|FILE REV',
3225 )
3237 )
3226 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3238 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3227 """Benchmark obtaining a revlog revision.
3239 """Benchmark obtaining a revlog revision.
3228
3240
3229 Obtaining a revlog revision consists of roughly the following steps:
3241 Obtaining a revlog revision consists of roughly the following steps:
3230
3242
3231 1. Compute the delta chain
3243 1. Compute the delta chain
3232 2. Slice the delta chain if applicable
3244 2. Slice the delta chain if applicable
3233 3. Obtain the raw chunks for that delta chain
3245 3. Obtain the raw chunks for that delta chain
3234 4. Decompress each raw chunk
3246 4. Decompress each raw chunk
3235 5. Apply binary patches to obtain fulltext
3247 5. Apply binary patches to obtain fulltext
3236 6. Verify hash of fulltext
3248 6. Verify hash of fulltext
3237
3249
3238 This command measures the time spent in each of these phases.
3250 This command measures the time spent in each of these phases.
3239 """
3251 """
3240 opts = _byteskwargs(opts)
3252 opts = _byteskwargs(opts)
3241
3253
3242 if opts.get(b'changelog') or opts.get(b'manifest'):
3254 if opts.get(b'changelog') or opts.get(b'manifest'):
3243 file_, rev = None, file_
3255 file_, rev = None, file_
3244 elif rev is None:
3256 elif rev is None:
3245 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3257 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3246
3258
3247 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3259 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3248
3260
3249 # _chunkraw was renamed to _getsegmentforrevs.
3261 # _chunkraw was renamed to _getsegmentforrevs.
3250 try:
3262 try:
3251 segmentforrevs = r._getsegmentforrevs
3263 segmentforrevs = r._getsegmentforrevs
3252 except AttributeError:
3264 except AttributeError:
3253 segmentforrevs = r._chunkraw
3265 segmentforrevs = r._chunkraw
3254
3266
3255 node = r.lookup(rev)
3267 node = r.lookup(rev)
3256 rev = r.rev(node)
3268 rev = r.rev(node)
3257
3269
3258 def getrawchunks(data, chain):
3270 def getrawchunks(data, chain):
3259 start = r.start
3271 start = r.start
3260 length = r.length
3272 length = r.length
3261 inline = r._inline
3273 inline = r._inline
3262 try:
3274 try:
3263 iosize = r.index.entry_size
3275 iosize = r.index.entry_size
3264 except AttributeError:
3276 except AttributeError:
3265 iosize = r._io.size
3277 iosize = r._io.size
3266 buffer = util.buffer
3278 buffer = util.buffer
3267
3279
3268 chunks = []
3280 chunks = []
3269 ladd = chunks.append
3281 ladd = chunks.append
3270 for idx, item in enumerate(chain):
3282 for idx, item in enumerate(chain):
3271 offset = start(item[0])
3283 offset = start(item[0])
3272 bits = data[idx]
3284 bits = data[idx]
3273 for rev in item:
3285 for rev in item:
3274 chunkstart = start(rev)
3286 chunkstart = start(rev)
3275 if inline:
3287 if inline:
3276 chunkstart += (rev + 1) * iosize
3288 chunkstart += (rev + 1) * iosize
3277 chunklength = length(rev)
3289 chunklength = length(rev)
3278 ladd(buffer(bits, chunkstart - offset, chunklength))
3290 ladd(buffer(bits, chunkstart - offset, chunklength))
3279
3291
3280 return chunks
3292 return chunks
3281
3293
3282 def dodeltachain(rev):
3294 def dodeltachain(rev):
3283 if not cache:
3295 if not cache:
3284 r.clearcaches()
3296 r.clearcaches()
3285 r._deltachain(rev)
3297 r._deltachain(rev)
3286
3298
3287 def doread(chain):
3299 def doread(chain):
3288 if not cache:
3300 if not cache:
3289 r.clearcaches()
3301 r.clearcaches()
3290 for item in slicedchain:
3302 for item in slicedchain:
3291 segmentforrevs(item[0], item[-1])
3303 segmentforrevs(item[0], item[-1])
3292
3304
3293 def doslice(r, chain, size):
3305 def doslice(r, chain, size):
3294 for s in slicechunk(r, chain, targetsize=size):
3306 for s in slicechunk(r, chain, targetsize=size):
3295 pass
3307 pass
3296
3308
3297 def dorawchunks(data, chain):
3309 def dorawchunks(data, chain):
3298 if not cache:
3310 if not cache:
3299 r.clearcaches()
3311 r.clearcaches()
3300 getrawchunks(data, chain)
3312 getrawchunks(data, chain)
3301
3313
3302 def dodecompress(chunks):
3314 def dodecompress(chunks):
3303 decomp = r.decompress
3315 decomp = r.decompress
3304 for chunk in chunks:
3316 for chunk in chunks:
3305 decomp(chunk)
3317 decomp(chunk)
3306
3318
3307 def dopatch(text, bins):
3319 def dopatch(text, bins):
3308 if not cache:
3320 if not cache:
3309 r.clearcaches()
3321 r.clearcaches()
3310 mdiff.patches(text, bins)
3322 mdiff.patches(text, bins)
3311
3323
3312 def dohash(text):
3324 def dohash(text):
3313 if not cache:
3325 if not cache:
3314 r.clearcaches()
3326 r.clearcaches()
3315 r.checkhash(text, node, rev=rev)
3327 r.checkhash(text, node, rev=rev)
3316
3328
3317 def dorevision():
3329 def dorevision():
3318 if not cache:
3330 if not cache:
3319 r.clearcaches()
3331 r.clearcaches()
3320 r.revision(node)
3332 r.revision(node)
3321
3333
3322 try:
3334 try:
3323 from mercurial.revlogutils.deltas import slicechunk
3335 from mercurial.revlogutils.deltas import slicechunk
3324 except ImportError:
3336 except ImportError:
3325 slicechunk = getattr(revlog, '_slicechunk', None)
3337 slicechunk = getattr(revlog, '_slicechunk', None)
3326
3338
3327 size = r.length(rev)
3339 size = r.length(rev)
3328 chain = r._deltachain(rev)[0]
3340 chain = r._deltachain(rev)[0]
3329 if not getattr(r, '_withsparseread', False):
3341 if not getattr(r, '_withsparseread', False):
3330 slicedchain = (chain,)
3342 slicedchain = (chain,)
3331 else:
3343 else:
3332 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3344 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3333 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3345 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3334 rawchunks = getrawchunks(data, slicedchain)
3346 rawchunks = getrawchunks(data, slicedchain)
3335 bins = r._chunks(chain)
3347 bins = r._chunks(chain)
3336 text = bytes(bins[0])
3348 text = bytes(bins[0])
3337 bins = bins[1:]
3349 bins = bins[1:]
3338 text = mdiff.patches(text, bins)
3350 text = mdiff.patches(text, bins)
3339
3351
3340 benches = [
3352 benches = [
3341 (lambda: dorevision(), b'full'),
3353 (lambda: dorevision(), b'full'),
3342 (lambda: dodeltachain(rev), b'deltachain'),
3354 (lambda: dodeltachain(rev), b'deltachain'),
3343 (lambda: doread(chain), b'read'),
3355 (lambda: doread(chain), b'read'),
3344 ]
3356 ]
3345
3357
3346 if getattr(r, '_withsparseread', False):
3358 if getattr(r, '_withsparseread', False):
3347 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3359 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3348 benches.append(slicing)
3360 benches.append(slicing)
3349
3361
3350 benches.extend(
3362 benches.extend(
3351 [
3363 [
3352 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3364 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3353 (lambda: dodecompress(rawchunks), b'decompress'),
3365 (lambda: dodecompress(rawchunks), b'decompress'),
3354 (lambda: dopatch(text, bins), b'patch'),
3366 (lambda: dopatch(text, bins), b'patch'),
3355 (lambda: dohash(text), b'hash'),
3367 (lambda: dohash(text), b'hash'),
3356 ]
3368 ]
3357 )
3369 )
3358
3370
3359 timer, fm = gettimer(ui, opts)
3371 timer, fm = gettimer(ui, opts)
3360 for fn, title in benches:
3372 for fn, title in benches:
3361 timer(fn, title=title)
3373 timer(fn, title=title)
3362 fm.end()
3374 fm.end()
3363
3375
3364
3376
3365 @command(
3377 @command(
3366 b'perf::revset|perfrevset',
3378 b'perf::revset|perfrevset',
3367 [
3379 [
3368 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3380 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3369 (b'', b'contexts', False, b'obtain changectx for each revision'),
3381 (b'', b'contexts', False, b'obtain changectx for each revision'),
3370 ]
3382 ]
3371 + formatteropts,
3383 + formatteropts,
3372 b"REVSET",
3384 b"REVSET",
3373 )
3385 )
3374 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3386 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3375 """benchmark the execution time of a revset
3387 """benchmark the execution time of a revset
3376
3388
3377 Use the --clean option if need to evaluate the impact of build volatile
3389 Use the --clean option if need to evaluate the impact of build volatile
3378 revisions set cache on the revset execution. Volatile cache hold filtered
3390 revisions set cache on the revset execution. Volatile cache hold filtered
3379 and obsolete related cache."""
3391 and obsolete related cache."""
3380 opts = _byteskwargs(opts)
3392 opts = _byteskwargs(opts)
3381
3393
3382 timer, fm = gettimer(ui, opts)
3394 timer, fm = gettimer(ui, opts)
3383
3395
3384 def d():
3396 def d():
3385 if clear:
3397 if clear:
3386 repo.invalidatevolatilesets()
3398 repo.invalidatevolatilesets()
3387 if contexts:
3399 if contexts:
3388 for ctx in repo.set(expr):
3400 for ctx in repo.set(expr):
3389 pass
3401 pass
3390 else:
3402 else:
3391 for r in repo.revs(expr):
3403 for r in repo.revs(expr):
3392 pass
3404 pass
3393
3405
3394 timer(d)
3406 timer(d)
3395 fm.end()
3407 fm.end()
3396
3408
3397
3409
3398 @command(
3410 @command(
3399 b'perf::volatilesets|perfvolatilesets',
3411 b'perf::volatilesets|perfvolatilesets',
3400 [
3412 [
3401 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3413 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3402 ]
3414 ]
3403 + formatteropts,
3415 + formatteropts,
3404 )
3416 )
3405 def perfvolatilesets(ui, repo, *names, **opts):
3417 def perfvolatilesets(ui, repo, *names, **opts):
3406 """benchmark the computation of various volatile set
3418 """benchmark the computation of various volatile set
3407
3419
3408 Volatile set computes element related to filtering and obsolescence."""
3420 Volatile set computes element related to filtering and obsolescence."""
3409 opts = _byteskwargs(opts)
3421 opts = _byteskwargs(opts)
3410 timer, fm = gettimer(ui, opts)
3422 timer, fm = gettimer(ui, opts)
3411 repo = repo.unfiltered()
3423 repo = repo.unfiltered()
3412
3424
3413 def getobs(name):
3425 def getobs(name):
3414 def d():
3426 def d():
3415 repo.invalidatevolatilesets()
3427 repo.invalidatevolatilesets()
3416 if opts[b'clear_obsstore']:
3428 if opts[b'clear_obsstore']:
3417 clearfilecache(repo, b'obsstore')
3429 clearfilecache(repo, b'obsstore')
3418 obsolete.getrevs(repo, name)
3430 obsolete.getrevs(repo, name)
3419
3431
3420 return d
3432 return d
3421
3433
3422 allobs = sorted(obsolete.cachefuncs)
3434 allobs = sorted(obsolete.cachefuncs)
3423 if names:
3435 if names:
3424 allobs = [n for n in allobs if n in names]
3436 allobs = [n for n in allobs if n in names]
3425
3437
3426 for name in allobs:
3438 for name in allobs:
3427 timer(getobs(name), title=name)
3439 timer(getobs(name), title=name)
3428
3440
3429 def getfiltered(name):
3441 def getfiltered(name):
3430 def d():
3442 def d():
3431 repo.invalidatevolatilesets()
3443 repo.invalidatevolatilesets()
3432 if opts[b'clear_obsstore']:
3444 if opts[b'clear_obsstore']:
3433 clearfilecache(repo, b'obsstore')
3445 clearfilecache(repo, b'obsstore')
3434 repoview.filterrevs(repo, name)
3446 repoview.filterrevs(repo, name)
3435
3447
3436 return d
3448 return d
3437
3449
3438 allfilter = sorted(repoview.filtertable)
3450 allfilter = sorted(repoview.filtertable)
3439 if names:
3451 if names:
3440 allfilter = [n for n in allfilter if n in names]
3452 allfilter = [n for n in allfilter if n in names]
3441
3453
3442 for name in allfilter:
3454 for name in allfilter:
3443 timer(getfiltered(name), title=name)
3455 timer(getfiltered(name), title=name)
3444 fm.end()
3456 fm.end()
3445
3457
3446
3458
3447 @command(
3459 @command(
3448 b'perf::branchmap|perfbranchmap',
3460 b'perf::branchmap|perfbranchmap',
3449 [
3461 [
3450 (b'f', b'full', False, b'Includes build time of subset'),
3462 (b'f', b'full', False, b'Includes build time of subset'),
3451 (
3463 (
3452 b'',
3464 b'',
3453 b'clear-revbranch',
3465 b'clear-revbranch',
3454 False,
3466 False,
3455 b'purge the revbranch cache between computation',
3467 b'purge the revbranch cache between computation',
3456 ),
3468 ),
3457 ]
3469 ]
3458 + formatteropts,
3470 + formatteropts,
3459 )
3471 )
3460 def perfbranchmap(ui, repo, *filternames, **opts):
3472 def perfbranchmap(ui, repo, *filternames, **opts):
3461 """benchmark the update of a branchmap
3473 """benchmark the update of a branchmap
3462
3474
3463 This benchmarks the full repo.branchmap() call with read and write disabled
3475 This benchmarks the full repo.branchmap() call with read and write disabled
3464 """
3476 """
3465 opts = _byteskwargs(opts)
3477 opts = _byteskwargs(opts)
3466 full = opts.get(b"full", False)
3478 full = opts.get(b"full", False)
3467 clear_revbranch = opts.get(b"clear_revbranch", False)
3479 clear_revbranch = opts.get(b"clear_revbranch", False)
3468 timer, fm = gettimer(ui, opts)
3480 timer, fm = gettimer(ui, opts)
3469
3481
3470 def getbranchmap(filtername):
3482 def getbranchmap(filtername):
3471 """generate a benchmark function for the filtername"""
3483 """generate a benchmark function for the filtername"""
3472 if filtername is None:
3484 if filtername is None:
3473 view = repo
3485 view = repo
3474 else:
3486 else:
3475 view = repo.filtered(filtername)
3487 view = repo.filtered(filtername)
3476 if util.safehasattr(view._branchcaches, '_per_filter'):
3488 if util.safehasattr(view._branchcaches, '_per_filter'):
3477 filtered = view._branchcaches._per_filter
3489 filtered = view._branchcaches._per_filter
3478 else:
3490 else:
3479 # older versions
3491 # older versions
3480 filtered = view._branchcaches
3492 filtered = view._branchcaches
3481
3493
3482 def d():
3494 def d():
3483 if clear_revbranch:
3495 if clear_revbranch:
3484 repo.revbranchcache()._clear()
3496 repo.revbranchcache()._clear()
3485 if full:
3497 if full:
3486 view._branchcaches.clear()
3498 view._branchcaches.clear()
3487 else:
3499 else:
3488 filtered.pop(filtername, None)
3500 filtered.pop(filtername, None)
3489 view.branchmap()
3501 view.branchmap()
3490
3502
3491 return d
3503 return d
3492
3504
3493 # add filter in smaller subset to bigger subset
3505 # add filter in smaller subset to bigger subset
3494 possiblefilters = set(repoview.filtertable)
3506 possiblefilters = set(repoview.filtertable)
3495 if filternames:
3507 if filternames:
3496 possiblefilters &= set(filternames)
3508 possiblefilters &= set(filternames)
3497 subsettable = getbranchmapsubsettable()
3509 subsettable = getbranchmapsubsettable()
3498 allfilters = []
3510 allfilters = []
3499 while possiblefilters:
3511 while possiblefilters:
3500 for name in possiblefilters:
3512 for name in possiblefilters:
3501 subset = subsettable.get(name)
3513 subset = subsettable.get(name)
3502 if subset not in possiblefilters:
3514 if subset not in possiblefilters:
3503 break
3515 break
3504 else:
3516 else:
3505 assert False, b'subset cycle %s!' % possiblefilters
3517 assert False, b'subset cycle %s!' % possiblefilters
3506 allfilters.append(name)
3518 allfilters.append(name)
3507 possiblefilters.remove(name)
3519 possiblefilters.remove(name)
3508
3520
3509 # warm the cache
3521 # warm the cache
3510 if not full:
3522 if not full:
3511 for name in allfilters:
3523 for name in allfilters:
3512 repo.filtered(name).branchmap()
3524 repo.filtered(name).branchmap()
3513 if not filternames or b'unfiltered' in filternames:
3525 if not filternames or b'unfiltered' in filternames:
3514 # add unfiltered
3526 # add unfiltered
3515 allfilters.append(None)
3527 allfilters.append(None)
3516
3528
3517 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3529 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3518 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3530 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3519 branchcacheread.set(classmethod(lambda *args: None))
3531 branchcacheread.set(classmethod(lambda *args: None))
3520 else:
3532 else:
3521 # older versions
3533 # older versions
3522 branchcacheread = safeattrsetter(branchmap, b'read')
3534 branchcacheread = safeattrsetter(branchmap, b'read')
3523 branchcacheread.set(lambda *args: None)
3535 branchcacheread.set(lambda *args: None)
3524 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3536 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3525 branchcachewrite.set(lambda *args: None)
3537 branchcachewrite.set(lambda *args: None)
3526 try:
3538 try:
3527 for name in allfilters:
3539 for name in allfilters:
3528 printname = name
3540 printname = name
3529 if name is None:
3541 if name is None:
3530 printname = b'unfiltered'
3542 printname = b'unfiltered'
3531 timer(getbranchmap(name), title=printname)
3543 timer(getbranchmap(name), title=printname)
3532 finally:
3544 finally:
3533 branchcacheread.restore()
3545 branchcacheread.restore()
3534 branchcachewrite.restore()
3546 branchcachewrite.restore()
3535 fm.end()
3547 fm.end()
3536
3548
3537
3549
3538 @command(
3550 @command(
3539 b'perf::branchmapupdate|perfbranchmapupdate',
3551 b'perf::branchmapupdate|perfbranchmapupdate',
3540 [
3552 [
3541 (b'', b'base', [], b'subset of revision to start from'),
3553 (b'', b'base', [], b'subset of revision to start from'),
3542 (b'', b'target', [], b'subset of revision to end with'),
3554 (b'', b'target', [], b'subset of revision to end with'),
3543 (b'', b'clear-caches', False, b'clear cache between each runs'),
3555 (b'', b'clear-caches', False, b'clear cache between each runs'),
3544 ]
3556 ]
3545 + formatteropts,
3557 + formatteropts,
3546 )
3558 )
3547 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3559 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3548 """benchmark branchmap update from for <base> revs to <target> revs
3560 """benchmark branchmap update from for <base> revs to <target> revs
3549
3561
3550 If `--clear-caches` is passed, the following items will be reset before
3562 If `--clear-caches` is passed, the following items will be reset before
3551 each update:
3563 each update:
3552 * the changelog instance and associated indexes
3564 * the changelog instance and associated indexes
3553 * the rev-branch-cache instance
3565 * the rev-branch-cache instance
3554
3566
3555 Examples:
3567 Examples:
3556
3568
3557 # update for the one last revision
3569 # update for the one last revision
3558 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3570 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3559
3571
3560 $ update for change coming with a new branch
3572 $ update for change coming with a new branch
3561 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3573 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3562 """
3574 """
3563 from mercurial import branchmap
3575 from mercurial import branchmap
3564 from mercurial import repoview
3576 from mercurial import repoview
3565
3577
3566 opts = _byteskwargs(opts)
3578 opts = _byteskwargs(opts)
3567 timer, fm = gettimer(ui, opts)
3579 timer, fm = gettimer(ui, opts)
3568 clearcaches = opts[b'clear_caches']
3580 clearcaches = opts[b'clear_caches']
3569 unfi = repo.unfiltered()
3581 unfi = repo.unfiltered()
3570 x = [None] # used to pass data between closure
3582 x = [None] # used to pass data between closure
3571
3583
3572 # we use a `list` here to avoid possible side effect from smartset
3584 # we use a `list` here to avoid possible side effect from smartset
3573 baserevs = list(scmutil.revrange(repo, base))
3585 baserevs = list(scmutil.revrange(repo, base))
3574 targetrevs = list(scmutil.revrange(repo, target))
3586 targetrevs = list(scmutil.revrange(repo, target))
3575 if not baserevs:
3587 if not baserevs:
3576 raise error.Abort(b'no revisions selected for --base')
3588 raise error.Abort(b'no revisions selected for --base')
3577 if not targetrevs:
3589 if not targetrevs:
3578 raise error.Abort(b'no revisions selected for --target')
3590 raise error.Abort(b'no revisions selected for --target')
3579
3591
3580 # make sure the target branchmap also contains the one in the base
3592 # make sure the target branchmap also contains the one in the base
3581 targetrevs = list(set(baserevs) | set(targetrevs))
3593 targetrevs = list(set(baserevs) | set(targetrevs))
3582 targetrevs.sort()
3594 targetrevs.sort()
3583
3595
3584 cl = repo.changelog
3596 cl = repo.changelog
3585 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3597 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3586 allbaserevs.sort()
3598 allbaserevs.sort()
3587 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3599 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3588
3600
3589 newrevs = list(alltargetrevs.difference(allbaserevs))
3601 newrevs = list(alltargetrevs.difference(allbaserevs))
3590 newrevs.sort()
3602 newrevs.sort()
3591
3603
3592 allrevs = frozenset(unfi.changelog.revs())
3604 allrevs = frozenset(unfi.changelog.revs())
3593 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3605 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3594 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3606 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3595
3607
3596 def basefilter(repo, visibilityexceptions=None):
3608 def basefilter(repo, visibilityexceptions=None):
3597 return basefilterrevs
3609 return basefilterrevs
3598
3610
3599 def targetfilter(repo, visibilityexceptions=None):
3611 def targetfilter(repo, visibilityexceptions=None):
3600 return targetfilterrevs
3612 return targetfilterrevs
3601
3613
3602 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3614 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3603 ui.status(msg % (len(allbaserevs), len(newrevs)))
3615 ui.status(msg % (len(allbaserevs), len(newrevs)))
3604 if targetfilterrevs:
3616 if targetfilterrevs:
3605 msg = b'(%d revisions still filtered)\n'
3617 msg = b'(%d revisions still filtered)\n'
3606 ui.status(msg % len(targetfilterrevs))
3618 ui.status(msg % len(targetfilterrevs))
3607
3619
3608 try:
3620 try:
3609 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3621 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3610 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3622 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3611
3623
3612 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3624 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3613 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3625 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3614
3626
3615 # try to find an existing branchmap to reuse
3627 # try to find an existing branchmap to reuse
3616 subsettable = getbranchmapsubsettable()
3628 subsettable = getbranchmapsubsettable()
3617 candidatefilter = subsettable.get(None)
3629 candidatefilter = subsettable.get(None)
3618 while candidatefilter is not None:
3630 while candidatefilter is not None:
3619 candidatebm = repo.filtered(candidatefilter).branchmap()
3631 candidatebm = repo.filtered(candidatefilter).branchmap()
3620 if candidatebm.validfor(baserepo):
3632 if candidatebm.validfor(baserepo):
3621 filtered = repoview.filterrevs(repo, candidatefilter)
3633 filtered = repoview.filterrevs(repo, candidatefilter)
3622 missing = [r for r in allbaserevs if r in filtered]
3634 missing = [r for r in allbaserevs if r in filtered]
3623 base = candidatebm.copy()
3635 base = candidatebm.copy()
3624 base.update(baserepo, missing)
3636 base.update(baserepo, missing)
3625 break
3637 break
3626 candidatefilter = subsettable.get(candidatefilter)
3638 candidatefilter = subsettable.get(candidatefilter)
3627 else:
3639 else:
3628 # no suitable subset where found
3640 # no suitable subset where found
3629 base = branchmap.branchcache()
3641 base = branchmap.branchcache()
3630 base.update(baserepo, allbaserevs)
3642 base.update(baserepo, allbaserevs)
3631
3643
3632 def setup():
3644 def setup():
3633 x[0] = base.copy()
3645 x[0] = base.copy()
3634 if clearcaches:
3646 if clearcaches:
3635 unfi._revbranchcache = None
3647 unfi._revbranchcache = None
3636 clearchangelog(repo)
3648 clearchangelog(repo)
3637
3649
3638 def bench():
3650 def bench():
3639 x[0].update(targetrepo, newrevs)
3651 x[0].update(targetrepo, newrevs)
3640
3652
3641 timer(bench, setup=setup)
3653 timer(bench, setup=setup)
3642 fm.end()
3654 fm.end()
3643 finally:
3655 finally:
3644 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3656 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3645 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3657 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3646
3658
3647
3659
3648 @command(
3660 @command(
3649 b'perf::branchmapload|perfbranchmapload',
3661 b'perf::branchmapload|perfbranchmapload',
3650 [
3662 [
3651 (b'f', b'filter', b'', b'Specify repoview filter'),
3663 (b'f', b'filter', b'', b'Specify repoview filter'),
3652 (b'', b'list', False, b'List brachmap filter caches'),
3664 (b'', b'list', False, b'List brachmap filter caches'),
3653 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3665 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3654 ]
3666 ]
3655 + formatteropts,
3667 + formatteropts,
3656 )
3668 )
3657 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3669 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3658 """benchmark reading the branchmap"""
3670 """benchmark reading the branchmap"""
3659 opts = _byteskwargs(opts)
3671 opts = _byteskwargs(opts)
3660 clearrevlogs = opts[b'clear_revlogs']
3672 clearrevlogs = opts[b'clear_revlogs']
3661
3673
3662 if list:
3674 if list:
3663 for name, kind, st in repo.cachevfs.readdir(stat=True):
3675 for name, kind, st in repo.cachevfs.readdir(stat=True):
3664 if name.startswith(b'branch2'):
3676 if name.startswith(b'branch2'):
3665 filtername = name.partition(b'-')[2] or b'unfiltered'
3677 filtername = name.partition(b'-')[2] or b'unfiltered'
3666 ui.status(
3678 ui.status(
3667 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3679 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3668 )
3680 )
3669 return
3681 return
3670 if not filter:
3682 if not filter:
3671 filter = None
3683 filter = None
3672 subsettable = getbranchmapsubsettable()
3684 subsettable = getbranchmapsubsettable()
3673 if filter is None:
3685 if filter is None:
3674 repo = repo.unfiltered()
3686 repo = repo.unfiltered()
3675 else:
3687 else:
3676 repo = repoview.repoview(repo, filter)
3688 repo = repoview.repoview(repo, filter)
3677
3689
3678 repo.branchmap() # make sure we have a relevant, up to date branchmap
3690 repo.branchmap() # make sure we have a relevant, up to date branchmap
3679
3691
3680 try:
3692 try:
3681 fromfile = branchmap.branchcache.fromfile
3693 fromfile = branchmap.branchcache.fromfile
3682 except AttributeError:
3694 except AttributeError:
3683 # older versions
3695 # older versions
3684 fromfile = branchmap.read
3696 fromfile = branchmap.read
3685
3697
3686 currentfilter = filter
3698 currentfilter = filter
3687 # try once without timer, the filter may not be cached
3699 # try once without timer, the filter may not be cached
3688 while fromfile(repo) is None:
3700 while fromfile(repo) is None:
3689 currentfilter = subsettable.get(currentfilter)
3701 currentfilter = subsettable.get(currentfilter)
3690 if currentfilter is None:
3702 if currentfilter is None:
3691 raise error.Abort(
3703 raise error.Abort(
3692 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3704 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3693 )
3705 )
3694 repo = repo.filtered(currentfilter)
3706 repo = repo.filtered(currentfilter)
3695 timer, fm = gettimer(ui, opts)
3707 timer, fm = gettimer(ui, opts)
3696
3708
3697 def setup():
3709 def setup():
3698 if clearrevlogs:
3710 if clearrevlogs:
3699 clearchangelog(repo)
3711 clearchangelog(repo)
3700
3712
3701 def bench():
3713 def bench():
3702 fromfile(repo)
3714 fromfile(repo)
3703
3715
3704 timer(bench, setup=setup)
3716 timer(bench, setup=setup)
3705 fm.end()
3717 fm.end()
3706
3718
3707
3719
3708 @command(b'perf::loadmarkers|perfloadmarkers')
3720 @command(b'perf::loadmarkers|perfloadmarkers')
3709 def perfloadmarkers(ui, repo):
3721 def perfloadmarkers(ui, repo):
3710 """benchmark the time to parse the on-disk markers for a repo
3722 """benchmark the time to parse the on-disk markers for a repo
3711
3723
3712 Result is the number of markers in the repo."""
3724 Result is the number of markers in the repo."""
3713 timer, fm = gettimer(ui)
3725 timer, fm = gettimer(ui)
3714 svfs = getsvfs(repo)
3726 svfs = getsvfs(repo)
3715 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3727 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3716 fm.end()
3728 fm.end()
3717
3729
3718
3730
3719 @command(
3731 @command(
3720 b'perf::lrucachedict|perflrucachedict',
3732 b'perf::lrucachedict|perflrucachedict',
3721 formatteropts
3733 formatteropts
3722 + [
3734 + [
3723 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3735 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3724 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3736 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3725 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3737 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3726 (b'', b'size', 4, b'size of cache'),
3738 (b'', b'size', 4, b'size of cache'),
3727 (b'', b'gets', 10000, b'number of key lookups'),
3739 (b'', b'gets', 10000, b'number of key lookups'),
3728 (b'', b'sets', 10000, b'number of key sets'),
3740 (b'', b'sets', 10000, b'number of key sets'),
3729 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3741 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3730 (
3742 (
3731 b'',
3743 b'',
3732 b'mixedgetfreq',
3744 b'mixedgetfreq',
3733 50,
3745 50,
3734 b'frequency of get vs set ops in mixed mode',
3746 b'frequency of get vs set ops in mixed mode',
3735 ),
3747 ),
3736 ],
3748 ],
3737 norepo=True,
3749 norepo=True,
3738 )
3750 )
3739 def perflrucache(
3751 def perflrucache(
3740 ui,
3752 ui,
3741 mincost=0,
3753 mincost=0,
3742 maxcost=100,
3754 maxcost=100,
3743 costlimit=0,
3755 costlimit=0,
3744 size=4,
3756 size=4,
3745 gets=10000,
3757 gets=10000,
3746 sets=10000,
3758 sets=10000,
3747 mixed=10000,
3759 mixed=10000,
3748 mixedgetfreq=50,
3760 mixedgetfreq=50,
3749 **opts
3761 **opts
3750 ):
3762 ):
3751 opts = _byteskwargs(opts)
3763 opts = _byteskwargs(opts)
3752
3764
3753 def doinit():
3765 def doinit():
3754 for i in _xrange(10000):
3766 for i in _xrange(10000):
3755 util.lrucachedict(size)
3767 util.lrucachedict(size)
3756
3768
3757 costrange = list(range(mincost, maxcost + 1))
3769 costrange = list(range(mincost, maxcost + 1))
3758
3770
3759 values = []
3771 values = []
3760 for i in _xrange(size):
3772 for i in _xrange(size):
3761 values.append(random.randint(0, _maxint))
3773 values.append(random.randint(0, _maxint))
3762
3774
3763 # Get mode fills the cache and tests raw lookup performance with no
3775 # Get mode fills the cache and tests raw lookup performance with no
3764 # eviction.
3776 # eviction.
3765 getseq = []
3777 getseq = []
3766 for i in _xrange(gets):
3778 for i in _xrange(gets):
3767 getseq.append(random.choice(values))
3779 getseq.append(random.choice(values))
3768
3780
3769 def dogets():
3781 def dogets():
3770 d = util.lrucachedict(size)
3782 d = util.lrucachedict(size)
3771 for v in values:
3783 for v in values:
3772 d[v] = v
3784 d[v] = v
3773 for key in getseq:
3785 for key in getseq:
3774 value = d[key]
3786 value = d[key]
3775 value # silence pyflakes warning
3787 value # silence pyflakes warning
3776
3788
3777 def dogetscost():
3789 def dogetscost():
3778 d = util.lrucachedict(size, maxcost=costlimit)
3790 d = util.lrucachedict(size, maxcost=costlimit)
3779 for i, v in enumerate(values):
3791 for i, v in enumerate(values):
3780 d.insert(v, v, cost=costs[i])
3792 d.insert(v, v, cost=costs[i])
3781 for key in getseq:
3793 for key in getseq:
3782 try:
3794 try:
3783 value = d[key]
3795 value = d[key]
3784 value # silence pyflakes warning
3796 value # silence pyflakes warning
3785 except KeyError:
3797 except KeyError:
3786 pass
3798 pass
3787
3799
3788 # Set mode tests insertion speed with cache eviction.
3800 # Set mode tests insertion speed with cache eviction.
3789 setseq = []
3801 setseq = []
3790 costs = []
3802 costs = []
3791 for i in _xrange(sets):
3803 for i in _xrange(sets):
3792 setseq.append(random.randint(0, _maxint))
3804 setseq.append(random.randint(0, _maxint))
3793 costs.append(random.choice(costrange))
3805 costs.append(random.choice(costrange))
3794
3806
3795 def doinserts():
3807 def doinserts():
3796 d = util.lrucachedict(size)
3808 d = util.lrucachedict(size)
3797 for v in setseq:
3809 for v in setseq:
3798 d.insert(v, v)
3810 d.insert(v, v)
3799
3811
3800 def doinsertscost():
3812 def doinsertscost():
3801 d = util.lrucachedict(size, maxcost=costlimit)
3813 d = util.lrucachedict(size, maxcost=costlimit)
3802 for i, v in enumerate(setseq):
3814 for i, v in enumerate(setseq):
3803 d.insert(v, v, cost=costs[i])
3815 d.insert(v, v, cost=costs[i])
3804
3816
3805 def dosets():
3817 def dosets():
3806 d = util.lrucachedict(size)
3818 d = util.lrucachedict(size)
3807 for v in setseq:
3819 for v in setseq:
3808 d[v] = v
3820 d[v] = v
3809
3821
3810 # Mixed mode randomly performs gets and sets with eviction.
3822 # Mixed mode randomly performs gets and sets with eviction.
3811 mixedops = []
3823 mixedops = []
3812 for i in _xrange(mixed):
3824 for i in _xrange(mixed):
3813 r = random.randint(0, 100)
3825 r = random.randint(0, 100)
3814 if r < mixedgetfreq:
3826 if r < mixedgetfreq:
3815 op = 0
3827 op = 0
3816 else:
3828 else:
3817 op = 1
3829 op = 1
3818
3830
3819 mixedops.append(
3831 mixedops.append(
3820 (op, random.randint(0, size * 2), random.choice(costrange))
3832 (op, random.randint(0, size * 2), random.choice(costrange))
3821 )
3833 )
3822
3834
3823 def domixed():
3835 def domixed():
3824 d = util.lrucachedict(size)
3836 d = util.lrucachedict(size)
3825
3837
3826 for op, v, cost in mixedops:
3838 for op, v, cost in mixedops:
3827 if op == 0:
3839 if op == 0:
3828 try:
3840 try:
3829 d[v]
3841 d[v]
3830 except KeyError:
3842 except KeyError:
3831 pass
3843 pass
3832 else:
3844 else:
3833 d[v] = v
3845 d[v] = v
3834
3846
3835 def domixedcost():
3847 def domixedcost():
3836 d = util.lrucachedict(size, maxcost=costlimit)
3848 d = util.lrucachedict(size, maxcost=costlimit)
3837
3849
3838 for op, v, cost in mixedops:
3850 for op, v, cost in mixedops:
3839 if op == 0:
3851 if op == 0:
3840 try:
3852 try:
3841 d[v]
3853 d[v]
3842 except KeyError:
3854 except KeyError:
3843 pass
3855 pass
3844 else:
3856 else:
3845 d.insert(v, v, cost=cost)
3857 d.insert(v, v, cost=cost)
3846
3858
3847 benches = [
3859 benches = [
3848 (doinit, b'init'),
3860 (doinit, b'init'),
3849 ]
3861 ]
3850
3862
3851 if costlimit:
3863 if costlimit:
3852 benches.extend(
3864 benches.extend(
3853 [
3865 [
3854 (dogetscost, b'gets w/ cost limit'),
3866 (dogetscost, b'gets w/ cost limit'),
3855 (doinsertscost, b'inserts w/ cost limit'),
3867 (doinsertscost, b'inserts w/ cost limit'),
3856 (domixedcost, b'mixed w/ cost limit'),
3868 (domixedcost, b'mixed w/ cost limit'),
3857 ]
3869 ]
3858 )
3870 )
3859 else:
3871 else:
3860 benches.extend(
3872 benches.extend(
3861 [
3873 [
3862 (dogets, b'gets'),
3874 (dogets, b'gets'),
3863 (doinserts, b'inserts'),
3875 (doinserts, b'inserts'),
3864 (dosets, b'sets'),
3876 (dosets, b'sets'),
3865 (domixed, b'mixed'),
3877 (domixed, b'mixed'),
3866 ]
3878 ]
3867 )
3879 )
3868
3880
3869 for fn, title in benches:
3881 for fn, title in benches:
3870 timer, fm = gettimer(ui, opts)
3882 timer, fm = gettimer(ui, opts)
3871 timer(fn, title=title)
3883 timer(fn, title=title)
3872 fm.end()
3884 fm.end()
3873
3885
3874
3886
3875 @command(
3887 @command(
3876 b'perf::write|perfwrite',
3888 b'perf::write|perfwrite',
3877 formatteropts
3889 formatteropts
3878 + [
3890 + [
3879 (b'', b'write-method', b'write', b'ui write method'),
3891 (b'', b'write-method', b'write', b'ui write method'),
3880 (b'', b'nlines', 100, b'number of lines'),
3892 (b'', b'nlines', 100, b'number of lines'),
3881 (b'', b'nitems', 100, b'number of items (per line)'),
3893 (b'', b'nitems', 100, b'number of items (per line)'),
3882 (b'', b'item', b'x', b'item that is written'),
3894 (b'', b'item', b'x', b'item that is written'),
3883 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3895 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3884 (b'', b'flush-line', None, b'flush after each line'),
3896 (b'', b'flush-line', None, b'flush after each line'),
3885 ],
3897 ],
3886 )
3898 )
3887 def perfwrite(ui, repo, **opts):
3899 def perfwrite(ui, repo, **opts):
3888 """microbenchmark ui.write (and others)"""
3900 """microbenchmark ui.write (and others)"""
3889 opts = _byteskwargs(opts)
3901 opts = _byteskwargs(opts)
3890
3902
3891 write = getattr(ui, _sysstr(opts[b'write_method']))
3903 write = getattr(ui, _sysstr(opts[b'write_method']))
3892 nlines = int(opts[b'nlines'])
3904 nlines = int(opts[b'nlines'])
3893 nitems = int(opts[b'nitems'])
3905 nitems = int(opts[b'nitems'])
3894 item = opts[b'item']
3906 item = opts[b'item']
3895 batch_line = opts.get(b'batch_line')
3907 batch_line = opts.get(b'batch_line')
3896 flush_line = opts.get(b'flush_line')
3908 flush_line = opts.get(b'flush_line')
3897
3909
3898 if batch_line:
3910 if batch_line:
3899 line = item * nitems + b'\n'
3911 line = item * nitems + b'\n'
3900
3912
3901 def benchmark():
3913 def benchmark():
3902 for i in pycompat.xrange(nlines):
3914 for i in pycompat.xrange(nlines):
3903 if batch_line:
3915 if batch_line:
3904 write(line)
3916 write(line)
3905 else:
3917 else:
3906 for i in pycompat.xrange(nitems):
3918 for i in pycompat.xrange(nitems):
3907 write(item)
3919 write(item)
3908 write(b'\n')
3920 write(b'\n')
3909 if flush_line:
3921 if flush_line:
3910 ui.flush()
3922 ui.flush()
3911 ui.flush()
3923 ui.flush()
3912
3924
3913 timer, fm = gettimer(ui, opts)
3925 timer, fm = gettimer(ui, opts)
3914 timer(benchmark)
3926 timer(benchmark)
3915 fm.end()
3927 fm.end()
3916
3928
3917
3929
3918 def uisetup(ui):
3930 def uisetup(ui):
3919 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3931 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3920 commands, b'debugrevlogopts'
3932 commands, b'debugrevlogopts'
3921 ):
3933 ):
3922 # for "historical portability":
3934 # for "historical portability":
3923 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3935 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3924 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3936 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3925 # openrevlog() should cause failure, because it has been
3937 # openrevlog() should cause failure, because it has been
3926 # available since 3.5 (or 49c583ca48c4).
3938 # available since 3.5 (or 49c583ca48c4).
3927 def openrevlog(orig, repo, cmd, file_, opts):
3939 def openrevlog(orig, repo, cmd, file_, opts):
3928 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3940 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3929 raise error.Abort(
3941 raise error.Abort(
3930 b"This version doesn't support --dir option",
3942 b"This version doesn't support --dir option",
3931 hint=b"use 3.5 or later",
3943 hint=b"use 3.5 or later",
3932 )
3944 )
3933 return orig(repo, cmd, file_, opts)
3945 return orig(repo, cmd, file_, opts)
3934
3946
3935 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3947 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3936
3948
3937
3949
3938 @command(
3950 @command(
3939 b'perf::progress|perfprogress',
3951 b'perf::progress|perfprogress',
3940 formatteropts
3952 formatteropts
3941 + [
3953 + [
3942 (b'', b'topic', b'topic', b'topic for progress messages'),
3954 (b'', b'topic', b'topic', b'topic for progress messages'),
3943 (b'c', b'total', 1000000, b'total value we are progressing to'),
3955 (b'c', b'total', 1000000, b'total value we are progressing to'),
3944 ],
3956 ],
3945 norepo=True,
3957 norepo=True,
3946 )
3958 )
3947 def perfprogress(ui, topic=None, total=None, **opts):
3959 def perfprogress(ui, topic=None, total=None, **opts):
3948 """printing of progress bars"""
3960 """printing of progress bars"""
3949 opts = _byteskwargs(opts)
3961 opts = _byteskwargs(opts)
3950
3962
3951 timer, fm = gettimer(ui, opts)
3963 timer, fm = gettimer(ui, opts)
3952
3964
3953 def doprogress():
3965 def doprogress():
3954 with ui.makeprogress(topic, total=total) as progress:
3966 with ui.makeprogress(topic, total=total) as progress:
3955 for i in _xrange(total):
3967 for i in _xrange(total):
3956 progress.increment()
3968 progress.increment()
3957
3969
3958 timer(doprogress)
3970 timer(doprogress)
3959 fm.end()
3971 fm.end()
@@ -1,56 +1,57 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 # Undump a dump from dumprevlog
2 # Undump a dump from dumprevlog
3 # $ hg init
3 # $ hg init
4 # $ undumprevlog < repo.dump
4 # $ undumprevlog < repo.dump
5
5
6 from __future__ import absolute_import, print_function
6 from __future__ import absolute_import, print_function
7
7
8 import sys
8 import sys
9 from mercurial.node import bin
9 from mercurial.node import bin
10 from mercurial import (
10 from mercurial import (
11 encoding,
11 encoding,
12 revlog,
12 revlog,
13 transaction,
13 transaction,
14 vfs as vfsmod,
14 vfs as vfsmod,
15 )
15 )
16 from mercurial.utils import procutil
16 from mercurial.utils import procutil
17
17
18 from mercurial.revlogutils import (
18 from mercurial.revlogutils import (
19 constants as revlog_constants,
19 constants as revlog_constants,
20 )
20 )
21
21
22 for fp in (sys.stdin, sys.stdout, sys.stderr):
22 for fp in (sys.stdin, sys.stdout, sys.stderr):
23 procutil.setbinary(fp)
23 procutil.setbinary(fp)
24
24
25 opener = vfsmod.vfs(b'.', False)
25 opener = vfsmod.vfs(b'.', False)
26 tr = transaction.transaction(
26 tr = transaction.transaction(
27 sys.stderr.write, opener, {b'store': opener}, b"undump.journal"
27 sys.stderr.write, opener, {b'store': opener}, b"undump.journal"
28 )
28 )
29 while True:
29 while True:
30 l = sys.stdin.readline()
30 l = sys.stdin.readline()
31 if not l:
31 if not l:
32 break
32 break
33 if l.startswith("file:"):
33 if l.startswith("file:"):
34 f = encoding.strtolocal(l[6:-1])
34 f = encoding.strtolocal(l[6:-1])
35 assert f.endswith(b'.i')
35 r = revlog.revlog(
36 r = revlog.revlog(
36 opener,
37 opener,
37 target=(revlog_constants.KIND_OTHER, b'undump-revlog'),
38 target=(revlog_constants.KIND_OTHER, b'undump-revlog'),
38 indexfile=f,
39 radix=f[:-2],
39 )
40 )
40 procutil.stdout.write(b'%s\n' % f)
41 procutil.stdout.write(b'%s\n' % f)
41 elif l.startswith("node:"):
42 elif l.startswith("node:"):
42 n = bin(l[6:-1])
43 n = bin(l[6:-1])
43 elif l.startswith("linkrev:"):
44 elif l.startswith("linkrev:"):
44 lr = int(l[9:-1])
45 lr = int(l[9:-1])
45 elif l.startswith("parents:"):
46 elif l.startswith("parents:"):
46 p = l[9:-1].split()
47 p = l[9:-1].split()
47 p1 = bin(p[0])
48 p1 = bin(p[0])
48 p2 = bin(p[1])
49 p2 = bin(p[1])
49 elif l.startswith("length:"):
50 elif l.startswith("length:"):
50 length = int(l[8:-1])
51 length = int(l[8:-1])
51 sys.stdin.readline() # start marker
52 sys.stdin.readline() # start marker
52 d = encoding.strtolocal(sys.stdin.read(length))
53 d = encoding.strtolocal(sys.stdin.read(length))
53 sys.stdin.readline() # end marker
54 sys.stdin.readline() # end marker
54 r.addrevision(d, tr, lr, p1, p2)
55 r.addrevision(d, tr, lr, p1, p2)
55
56
56 tr.close()
57 tr.close()
@@ -1,399 +1,399 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import threading
3 import threading
4
4
5 from mercurial.node import (
5 from mercurial.node import (
6 hex,
6 hex,
7 sha1nodeconstants,
7 sha1nodeconstants,
8 )
8 )
9 from mercurial.pycompat import getattr
9 from mercurial.pycompat import getattr
10 from mercurial import (
10 from mercurial import (
11 mdiff,
11 mdiff,
12 pycompat,
12 pycompat,
13 revlog,
13 revlog,
14 )
14 )
15 from . import (
15 from . import (
16 basestore,
16 basestore,
17 constants,
17 constants,
18 shallowutil,
18 shallowutil,
19 )
19 )
20
20
21
21
22 class ChainIndicies(object):
22 class ChainIndicies(object):
23 """A static class for easy reference to the delta chain indicies."""
23 """A static class for easy reference to the delta chain indicies."""
24
24
25 # The filename of this revision delta
25 # The filename of this revision delta
26 NAME = 0
26 NAME = 0
27 # The mercurial file node for this revision delta
27 # The mercurial file node for this revision delta
28 NODE = 1
28 NODE = 1
29 # The filename of the delta base's revision. This is useful when delta
29 # The filename of the delta base's revision. This is useful when delta
30 # between different files (like in the case of a move or copy, we can delta
30 # between different files (like in the case of a move or copy, we can delta
31 # against the original file content).
31 # against the original file content).
32 BASENAME = 2
32 BASENAME = 2
33 # The mercurial file node for the delta base revision. This is the nullid if
33 # The mercurial file node for the delta base revision. This is the nullid if
34 # this delta is a full text.
34 # this delta is a full text.
35 BASENODE = 3
35 BASENODE = 3
36 # The actual delta or full text data.
36 # The actual delta or full text data.
37 DATA = 4
37 DATA = 4
38
38
39
39
40 class unioncontentstore(basestore.baseunionstore):
40 class unioncontentstore(basestore.baseunionstore):
41 def __init__(self, *args, **kwargs):
41 def __init__(self, *args, **kwargs):
42 super(unioncontentstore, self).__init__(*args, **kwargs)
42 super(unioncontentstore, self).__init__(*args, **kwargs)
43
43
44 self.stores = args
44 self.stores = args
45 self.writestore = kwargs.get('writestore')
45 self.writestore = kwargs.get('writestore')
46
46
47 # If allowincomplete==True then the union store can return partial
47 # If allowincomplete==True then the union store can return partial
48 # delta chains, otherwise it will throw a KeyError if a full
48 # delta chains, otherwise it will throw a KeyError if a full
49 # deltachain can't be found.
49 # deltachain can't be found.
50 self.allowincomplete = kwargs.get('allowincomplete', False)
50 self.allowincomplete = kwargs.get('allowincomplete', False)
51
51
52 def get(self, name, node):
52 def get(self, name, node):
53 """Fetches the full text revision contents of the given name+node pair.
53 """Fetches the full text revision contents of the given name+node pair.
54 If the full text doesn't exist, throws a KeyError.
54 If the full text doesn't exist, throws a KeyError.
55
55
56 Under the hood, this uses getdeltachain() across all the stores to build
56 Under the hood, this uses getdeltachain() across all the stores to build
57 up a full chain to produce the full text.
57 up a full chain to produce the full text.
58 """
58 """
59 chain = self.getdeltachain(name, node)
59 chain = self.getdeltachain(name, node)
60
60
61 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
61 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
62 # If we didn't receive a full chain, throw
62 # If we didn't receive a full chain, throw
63 raise KeyError((name, hex(node)))
63 raise KeyError((name, hex(node)))
64
64
65 # The last entry in the chain is a full text, so we start our delta
65 # The last entry in the chain is a full text, so we start our delta
66 # applies with that.
66 # applies with that.
67 fulltext = chain.pop()[ChainIndicies.DATA]
67 fulltext = chain.pop()[ChainIndicies.DATA]
68
68
69 text = fulltext
69 text = fulltext
70 while chain:
70 while chain:
71 delta = chain.pop()[ChainIndicies.DATA]
71 delta = chain.pop()[ChainIndicies.DATA]
72 text = mdiff.patches(text, [delta])
72 text = mdiff.patches(text, [delta])
73
73
74 return text
74 return text
75
75
76 @basestore.baseunionstore.retriable
76 @basestore.baseunionstore.retriable
77 def getdelta(self, name, node):
77 def getdelta(self, name, node):
78 """Return the single delta entry for the given name/node pair."""
78 """Return the single delta entry for the given name/node pair."""
79 for store in self.stores:
79 for store in self.stores:
80 try:
80 try:
81 return store.getdelta(name, node)
81 return store.getdelta(name, node)
82 except KeyError:
82 except KeyError:
83 pass
83 pass
84
84
85 raise KeyError((name, hex(node)))
85 raise KeyError((name, hex(node)))
86
86
87 def getdeltachain(self, name, node):
87 def getdeltachain(self, name, node):
88 """Returns the deltachain for the given name/node pair.
88 """Returns the deltachain for the given name/node pair.
89
89
90 Returns an ordered list of:
90 Returns an ordered list of:
91
91
92 [(name, node, deltabasename, deltabasenode, deltacontent),...]
92 [(name, node, deltabasename, deltabasenode, deltacontent),...]
93
93
94 where the chain is terminated by a full text entry with a nullid
94 where the chain is terminated by a full text entry with a nullid
95 deltabasenode.
95 deltabasenode.
96 """
96 """
97 chain = self._getpartialchain(name, node)
97 chain = self._getpartialchain(name, node)
98 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
98 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
99 x, x, deltabasename, deltabasenode, x = chain[-1]
99 x, x, deltabasename, deltabasenode, x = chain[-1]
100 try:
100 try:
101 morechain = self._getpartialchain(deltabasename, deltabasenode)
101 morechain = self._getpartialchain(deltabasename, deltabasenode)
102 chain.extend(morechain)
102 chain.extend(morechain)
103 except KeyError:
103 except KeyError:
104 # If we allow incomplete chains, don't throw.
104 # If we allow incomplete chains, don't throw.
105 if not self.allowincomplete:
105 if not self.allowincomplete:
106 raise
106 raise
107 break
107 break
108
108
109 return chain
109 return chain
110
110
111 @basestore.baseunionstore.retriable
111 @basestore.baseunionstore.retriable
112 def getmeta(self, name, node):
112 def getmeta(self, name, node):
113 """Returns the metadata dict for given node."""
113 """Returns the metadata dict for given node."""
114 for store in self.stores:
114 for store in self.stores:
115 try:
115 try:
116 return store.getmeta(name, node)
116 return store.getmeta(name, node)
117 except KeyError:
117 except KeyError:
118 pass
118 pass
119 raise KeyError((name, hex(node)))
119 raise KeyError((name, hex(node)))
120
120
121 def getmetrics(self):
121 def getmetrics(self):
122 metrics = [s.getmetrics() for s in self.stores]
122 metrics = [s.getmetrics() for s in self.stores]
123 return shallowutil.sumdicts(*metrics)
123 return shallowutil.sumdicts(*metrics)
124
124
125 @basestore.baseunionstore.retriable
125 @basestore.baseunionstore.retriable
126 def _getpartialchain(self, name, node):
126 def _getpartialchain(self, name, node):
127 """Returns a partial delta chain for the given name/node pair.
127 """Returns a partial delta chain for the given name/node pair.
128
128
129 A partial chain is a chain that may not be terminated in a full-text.
129 A partial chain is a chain that may not be terminated in a full-text.
130 """
130 """
131 for store in self.stores:
131 for store in self.stores:
132 try:
132 try:
133 return store.getdeltachain(name, node)
133 return store.getdeltachain(name, node)
134 except KeyError:
134 except KeyError:
135 pass
135 pass
136
136
137 raise KeyError((name, hex(node)))
137 raise KeyError((name, hex(node)))
138
138
139 def add(self, name, node, data):
139 def add(self, name, node, data):
140 raise RuntimeError(
140 raise RuntimeError(
141 b"cannot add content only to remotefilelog contentstore"
141 b"cannot add content only to remotefilelog contentstore"
142 )
142 )
143
143
144 def getmissing(self, keys):
144 def getmissing(self, keys):
145 missing = keys
145 missing = keys
146 for store in self.stores:
146 for store in self.stores:
147 if missing:
147 if missing:
148 missing = store.getmissing(missing)
148 missing = store.getmissing(missing)
149 return missing
149 return missing
150
150
151 def addremotefilelognode(self, name, node, data):
151 def addremotefilelognode(self, name, node, data):
152 if self.writestore:
152 if self.writestore:
153 self.writestore.addremotefilelognode(name, node, data)
153 self.writestore.addremotefilelognode(name, node, data)
154 else:
154 else:
155 raise RuntimeError(b"no writable store configured")
155 raise RuntimeError(b"no writable store configured")
156
156
157 def markledger(self, ledger, options=None):
157 def markledger(self, ledger, options=None):
158 for store in self.stores:
158 for store in self.stores:
159 store.markledger(ledger, options)
159 store.markledger(ledger, options)
160
160
161
161
162 class remotefilelogcontentstore(basestore.basestore):
162 class remotefilelogcontentstore(basestore.basestore):
163 def __init__(self, *args, **kwargs):
163 def __init__(self, *args, **kwargs):
164 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
164 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
165 self._threaddata = threading.local()
165 self._threaddata = threading.local()
166
166
167 def get(self, name, node):
167 def get(self, name, node):
168 # return raw revision text
168 # return raw revision text
169 data = self._getdata(name, node)
169 data = self._getdata(name, node)
170
170
171 offset, size, flags = shallowutil.parsesizeflags(data)
171 offset, size, flags = shallowutil.parsesizeflags(data)
172 content = data[offset : offset + size]
172 content = data[offset : offset + size]
173
173
174 ancestormap = shallowutil.ancestormap(data)
174 ancestormap = shallowutil.ancestormap(data)
175 p1, p2, linknode, copyfrom = ancestormap[node]
175 p1, p2, linknode, copyfrom = ancestormap[node]
176 copyrev = None
176 copyrev = None
177 if copyfrom:
177 if copyfrom:
178 copyrev = hex(p1)
178 copyrev = hex(p1)
179
179
180 self._updatemetacache(node, size, flags)
180 self._updatemetacache(node, size, flags)
181
181
182 # lfs tracks renames in its own metadata, remove hg copy metadata,
182 # lfs tracks renames in its own metadata, remove hg copy metadata,
183 # because copy metadata will be re-added by lfs flag processor.
183 # because copy metadata will be re-added by lfs flag processor.
184 if flags & revlog.REVIDX_EXTSTORED:
184 if flags & revlog.REVIDX_EXTSTORED:
185 copyrev = copyfrom = None
185 copyrev = copyfrom = None
186 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
186 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
187 return revision
187 return revision
188
188
189 def getdelta(self, name, node):
189 def getdelta(self, name, node):
190 # Since remotefilelog content stores only contain full texts, just
190 # Since remotefilelog content stores only contain full texts, just
191 # return that.
191 # return that.
192 revision = self.get(name, node)
192 revision = self.get(name, node)
193 return (
193 return (
194 revision,
194 revision,
195 name,
195 name,
196 sha1nodeconstants.nullid,
196 sha1nodeconstants.nullid,
197 self.getmeta(name, node),
197 self.getmeta(name, node),
198 )
198 )
199
199
200 def getdeltachain(self, name, node):
200 def getdeltachain(self, name, node):
201 # Since remotefilelog content stores just contain full texts, we return
201 # Since remotefilelog content stores just contain full texts, we return
202 # a fake delta chain that just consists of a single full text revision.
202 # a fake delta chain that just consists of a single full text revision.
203 # The nullid in the deltabasenode slot indicates that the revision is a
203 # The nullid in the deltabasenode slot indicates that the revision is a
204 # fulltext.
204 # fulltext.
205 revision = self.get(name, node)
205 revision = self.get(name, node)
206 return [(name, node, None, sha1nodeconstants.nullid, revision)]
206 return [(name, node, None, sha1nodeconstants.nullid, revision)]
207
207
208 def getmeta(self, name, node):
208 def getmeta(self, name, node):
209 self._sanitizemetacache()
209 self._sanitizemetacache()
210 if node != self._threaddata.metacache[0]:
210 if node != self._threaddata.metacache[0]:
211 data = self._getdata(name, node)
211 data = self._getdata(name, node)
212 offset, size, flags = shallowutil.parsesizeflags(data)
212 offset, size, flags = shallowutil.parsesizeflags(data)
213 self._updatemetacache(node, size, flags)
213 self._updatemetacache(node, size, flags)
214 return self._threaddata.metacache[1]
214 return self._threaddata.metacache[1]
215
215
216 def add(self, name, node, data):
216 def add(self, name, node, data):
217 raise RuntimeError(
217 raise RuntimeError(
218 b"cannot add content only to remotefilelog contentstore"
218 b"cannot add content only to remotefilelog contentstore"
219 )
219 )
220
220
221 def _sanitizemetacache(self):
221 def _sanitizemetacache(self):
222 metacache = getattr(self._threaddata, 'metacache', None)
222 metacache = getattr(self._threaddata, 'metacache', None)
223 if metacache is None:
223 if metacache is None:
224 self._threaddata.metacache = (None, None) # (node, meta)
224 self._threaddata.metacache = (None, None) # (node, meta)
225
225
226 def _updatemetacache(self, node, size, flags):
226 def _updatemetacache(self, node, size, flags):
227 self._sanitizemetacache()
227 self._sanitizemetacache()
228 if node == self._threaddata.metacache[0]:
228 if node == self._threaddata.metacache[0]:
229 return
229 return
230 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
230 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
231 self._threaddata.metacache = (node, meta)
231 self._threaddata.metacache = (node, meta)
232
232
233
233
234 class remotecontentstore(object):
234 class remotecontentstore(object):
235 def __init__(self, ui, fileservice, shared):
235 def __init__(self, ui, fileservice, shared):
236 self._fileservice = fileservice
236 self._fileservice = fileservice
237 # type(shared) is usually remotefilelogcontentstore
237 # type(shared) is usually remotefilelogcontentstore
238 self._shared = shared
238 self._shared = shared
239
239
240 def get(self, name, node):
240 def get(self, name, node):
241 self._fileservice.prefetch(
241 self._fileservice.prefetch(
242 [(name, hex(node))], force=True, fetchdata=True
242 [(name, hex(node))], force=True, fetchdata=True
243 )
243 )
244 return self._shared.get(name, node)
244 return self._shared.get(name, node)
245
245
246 def getdelta(self, name, node):
246 def getdelta(self, name, node):
247 revision = self.get(name, node)
247 revision = self.get(name, node)
248 return (
248 return (
249 revision,
249 revision,
250 name,
250 name,
251 sha1nodeconstants.nullid,
251 sha1nodeconstants.nullid,
252 self._shared.getmeta(name, node),
252 self._shared.getmeta(name, node),
253 )
253 )
254
254
255 def getdeltachain(self, name, node):
255 def getdeltachain(self, name, node):
256 # Since our remote content stores just contain full texts, we return a
256 # Since our remote content stores just contain full texts, we return a
257 # fake delta chain that just consists of a single full text revision.
257 # fake delta chain that just consists of a single full text revision.
258 # The nullid in the deltabasenode slot indicates that the revision is a
258 # The nullid in the deltabasenode slot indicates that the revision is a
259 # fulltext.
259 # fulltext.
260 revision = self.get(name, node)
260 revision = self.get(name, node)
261 return [(name, node, None, sha1nodeconstants.nullid, revision)]
261 return [(name, node, None, sha1nodeconstants.nullid, revision)]
262
262
263 def getmeta(self, name, node):
263 def getmeta(self, name, node):
264 self._fileservice.prefetch(
264 self._fileservice.prefetch(
265 [(name, hex(node))], force=True, fetchdata=True
265 [(name, hex(node))], force=True, fetchdata=True
266 )
266 )
267 return self._shared.getmeta(name, node)
267 return self._shared.getmeta(name, node)
268
268
269 def add(self, name, node, data):
269 def add(self, name, node, data):
270 raise RuntimeError(b"cannot add to a remote store")
270 raise RuntimeError(b"cannot add to a remote store")
271
271
272 def getmissing(self, keys):
272 def getmissing(self, keys):
273 return keys
273 return keys
274
274
275 def markledger(self, ledger, options=None):
275 def markledger(self, ledger, options=None):
276 pass
276 pass
277
277
278
278
279 class manifestrevlogstore(object):
279 class manifestrevlogstore(object):
280 def __init__(self, repo):
280 def __init__(self, repo):
281 self._store = repo.store
281 self._store = repo.store
282 self._svfs = repo.svfs
282 self._svfs = repo.svfs
283 self._revlogs = dict()
283 self._revlogs = dict()
284 self._cl = revlog.revlog(self._svfs, indexfile=b'00changelog.i')
284 self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
285 self._repackstartlinkrev = 0
285 self._repackstartlinkrev = 0
286
286
287 def get(self, name, node):
287 def get(self, name, node):
288 return self._revlog(name).rawdata(node)
288 return self._revlog(name).rawdata(node)
289
289
290 def getdelta(self, name, node):
290 def getdelta(self, name, node):
291 revision = self.get(name, node)
291 revision = self.get(name, node)
292 return revision, name, self._cl.nullid, self.getmeta(name, node)
292 return revision, name, self._cl.nullid, self.getmeta(name, node)
293
293
294 def getdeltachain(self, name, node):
294 def getdeltachain(self, name, node):
295 revision = self.get(name, node)
295 revision = self.get(name, node)
296 return [(name, node, None, self._cl.nullid, revision)]
296 return [(name, node, None, self._cl.nullid, revision)]
297
297
298 def getmeta(self, name, node):
298 def getmeta(self, name, node):
299 rl = self._revlog(name)
299 rl = self._revlog(name)
300 rev = rl.rev(node)
300 rev = rl.rev(node)
301 return {
301 return {
302 constants.METAKEYFLAG: rl.flags(rev),
302 constants.METAKEYFLAG: rl.flags(rev),
303 constants.METAKEYSIZE: rl.rawsize(rev),
303 constants.METAKEYSIZE: rl.rawsize(rev),
304 }
304 }
305
305
306 def getancestors(self, name, node, known=None):
306 def getancestors(self, name, node, known=None):
307 if known is None:
307 if known is None:
308 known = set()
308 known = set()
309 if node in known:
309 if node in known:
310 return []
310 return []
311
311
312 rl = self._revlog(name)
312 rl = self._revlog(name)
313 ancestors = {}
313 ancestors = {}
314 missing = {node}
314 missing = {node}
315 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
315 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
316 ancnode = rl.node(ancrev)
316 ancnode = rl.node(ancrev)
317 missing.discard(ancnode)
317 missing.discard(ancnode)
318
318
319 p1, p2 = rl.parents(ancnode)
319 p1, p2 = rl.parents(ancnode)
320 if p1 != self._cl.nullid and p1 not in known:
320 if p1 != self._cl.nullid and p1 not in known:
321 missing.add(p1)
321 missing.add(p1)
322 if p2 != self._cl.nullid and p2 not in known:
322 if p2 != self._cl.nullid and p2 not in known:
323 missing.add(p2)
323 missing.add(p2)
324
324
325 linknode = self._cl.node(rl.linkrev(ancrev))
325 linknode = self._cl.node(rl.linkrev(ancrev))
326 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
326 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
327 if not missing:
327 if not missing:
328 break
328 break
329 return ancestors
329 return ancestors
330
330
331 def getnodeinfo(self, name, node):
331 def getnodeinfo(self, name, node):
332 cl = self._cl
332 cl = self._cl
333 rl = self._revlog(name)
333 rl = self._revlog(name)
334 parents = rl.parents(node)
334 parents = rl.parents(node)
335 linkrev = rl.linkrev(rl.rev(node))
335 linkrev = rl.linkrev(rl.rev(node))
336 return (parents[0], parents[1], cl.node(linkrev), None)
336 return (parents[0], parents[1], cl.node(linkrev), None)
337
337
338 def add(self, *args):
338 def add(self, *args):
339 raise RuntimeError(b"cannot add to a revlog store")
339 raise RuntimeError(b"cannot add to a revlog store")
340
340
341 def _revlog(self, name):
341 def _revlog(self, name):
342 rl = self._revlogs.get(name)
342 rl = self._revlogs.get(name)
343 if rl is None:
343 if rl is None:
344 revlogname = b'00manifesttree.i'
344 revlogname = b'00manifesttree'
345 if name != b'':
345 if name != b'':
346 revlogname = b'meta/%s/00manifest.i' % name
346 revlogname = b'meta/%s/00manifest' % name
347 rl = revlog.revlog(self._svfs, indexfile=revlogname)
347 rl = revlog.revlog(self._svfs, radix=revlogname)
348 self._revlogs[name] = rl
348 self._revlogs[name] = rl
349 return rl
349 return rl
350
350
351 def getmissing(self, keys):
351 def getmissing(self, keys):
352 missing = []
352 missing = []
353 for name, node in keys:
353 for name, node in keys:
354 mfrevlog = self._revlog(name)
354 mfrevlog = self._revlog(name)
355 if node not in mfrevlog.nodemap:
355 if node not in mfrevlog.nodemap:
356 missing.append((name, node))
356 missing.append((name, node))
357
357
358 return missing
358 return missing
359
359
360 def setrepacklinkrevrange(self, startrev, endrev):
360 def setrepacklinkrevrange(self, startrev, endrev):
361 self._repackstartlinkrev = startrev
361 self._repackstartlinkrev = startrev
362 self._repackendlinkrev = endrev
362 self._repackendlinkrev = endrev
363
363
364 def markledger(self, ledger, options=None):
364 def markledger(self, ledger, options=None):
365 if options and options.get(constants.OPTION_PACKSONLY):
365 if options and options.get(constants.OPTION_PACKSONLY):
366 return
366 return
367 treename = b''
367 treename = b''
368 rl = revlog.revlog(self._svfs, indexfile=b'00manifesttree.i')
368 rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
369 startlinkrev = self._repackstartlinkrev
369 startlinkrev = self._repackstartlinkrev
370 endlinkrev = self._repackendlinkrev
370 endlinkrev = self._repackendlinkrev
371 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
371 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
372 linkrev = rl.linkrev(rev)
372 linkrev = rl.linkrev(rev)
373 if linkrev < startlinkrev:
373 if linkrev < startlinkrev:
374 break
374 break
375 if linkrev > endlinkrev:
375 if linkrev > endlinkrev:
376 continue
376 continue
377 node = rl.node(rev)
377 node = rl.node(rev)
378 ledger.markdataentry(self, treename, node)
378 ledger.markdataentry(self, treename, node)
379 ledger.markhistoryentry(self, treename, node)
379 ledger.markhistoryentry(self, treename, node)
380
380
381 for t, path, encoded, size in self._store.datafiles():
381 for t, path, encoded, size in self._store.datafiles():
382 if path[:5] != b'meta/' or path[-2:] != b'.i':
382 if path[:5] != b'meta/' or path[-2:] != b'.i':
383 continue
383 continue
384
384
385 treename = path[5 : -len(b'/00manifest.i')]
385 treename = path[5 : -len(b'/00manifest')]
386
386
387 rl = revlog.revlog(self._svfs, indexfile=path)
387 rl = revlog.revlog(self._svfs, indexfile=path[:-2])
388 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
388 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
389 linkrev = rl.linkrev(rev)
389 linkrev = rl.linkrev(rev)
390 if linkrev < startlinkrev:
390 if linkrev < startlinkrev:
391 break
391 break
392 if linkrev > endlinkrev:
392 if linkrev > endlinkrev:
393 continue
393 continue
394 node = rl.node(rev)
394 node = rl.node(rev)
395 ledger.markdataentry(self, treename, node)
395 ledger.markdataentry(self, treename, node)
396 ledger.markhistoryentry(self, treename, node)
396 ledger.markhistoryentry(self, treename, node)
397
397
398 def cleanup(self, ledger):
398 def cleanup(self, ledger):
399 pass
399 pass
@@ -1,714 +1,714 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 bundle2,
26 bundle2,
27 changegroup,
27 changegroup,
28 changelog,
28 changelog,
29 cmdutil,
29 cmdutil,
30 discovery,
30 discovery,
31 encoding,
31 encoding,
32 error,
32 error,
33 exchange,
33 exchange,
34 filelog,
34 filelog,
35 localrepo,
35 localrepo,
36 manifest,
36 manifest,
37 mdiff,
37 mdiff,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 revlog,
41 revlog,
42 util,
42 util,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 urlutil,
46 urlutil,
47 )
47 )
48
48
49 from .revlogutils import (
49 from .revlogutils import (
50 constants as revlog_constants,
50 constants as revlog_constants,
51 )
51 )
52
52
53
53
54 class bundlerevlog(revlog.revlog):
54 class bundlerevlog(revlog.revlog):
55 def __init__(self, opener, target, indexfile, cgunpacker, linkmapper):
55 def __init__(self, opener, target, radix, cgunpacker, linkmapper):
56 # How it works:
56 # How it works:
57 # To retrieve a revision, we need to know the offset of the revision in
57 # To retrieve a revision, we need to know the offset of the revision in
58 # the bundle (an unbundle object). We store this offset in the index
58 # the bundle (an unbundle object). We store this offset in the index
59 # (start). The base of the delta is stored in the base field.
59 # (start). The base of the delta is stored in the base field.
60 #
60 #
61 # To differentiate a rev in the bundle from a rev in the revlog, we
61 # To differentiate a rev in the bundle from a rev in the revlog, we
62 # check revision against repotiprev.
62 # check revision against repotiprev.
63 opener = vfsmod.readonlyvfs(opener)
63 opener = vfsmod.readonlyvfs(opener)
64 revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
64 revlog.revlog.__init__(self, opener, target=target, radix=radix)
65 self.bundle = cgunpacker
65 self.bundle = cgunpacker
66 n = len(self)
66 n = len(self)
67 self.repotiprev = n - 1
67 self.repotiprev = n - 1
68 self.bundlerevs = set() # used by 'bundle()' revset expression
68 self.bundlerevs = set() # used by 'bundle()' revset expression
69 for deltadata in cgunpacker.deltaiter():
69 for deltadata in cgunpacker.deltaiter():
70 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
70 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
71
71
72 size = len(delta)
72 size = len(delta)
73 start = cgunpacker.tell() - size
73 start = cgunpacker.tell() - size
74
74
75 if self.index.has_node(node):
75 if self.index.has_node(node):
76 # this can happen if two branches make the same change
76 # this can happen if two branches make the same change
77 self.bundlerevs.add(self.index.rev(node))
77 self.bundlerevs.add(self.index.rev(node))
78 continue
78 continue
79 if cs == node:
79 if cs == node:
80 linkrev = nullrev
80 linkrev = nullrev
81 else:
81 else:
82 linkrev = linkmapper(cs)
82 linkrev = linkmapper(cs)
83
83
84 for p in (p1, p2):
84 for p in (p1, p2):
85 if not self.index.has_node(p):
85 if not self.index.has_node(p):
86 raise error.LookupError(
86 raise error.LookupError(
87 p, self._indexfile, _(b"unknown parent")
87 p, self._indexfile, _(b"unknown parent")
88 )
88 )
89
89
90 if not self.index.has_node(deltabase):
90 if not self.index.has_node(deltabase):
91 raise LookupError(
91 raise LookupError(
92 deltabase, self._indexfile, _(b'unknown delta base')
92 deltabase, self._indexfile, _(b'unknown delta base')
93 )
93 )
94
94
95 baserev = self.rev(deltabase)
95 baserev = self.rev(deltabase)
96 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
96 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
97 e = (
97 e = (
98 revlog.offset_type(start, flags),
98 revlog.offset_type(start, flags),
99 size,
99 size,
100 -1,
100 -1,
101 baserev,
101 baserev,
102 linkrev,
102 linkrev,
103 self.rev(p1),
103 self.rev(p1),
104 self.rev(p2),
104 self.rev(p2),
105 node,
105 node,
106 0,
106 0,
107 0,
107 0,
108 )
108 )
109 self.index.append(e)
109 self.index.append(e)
110 self.bundlerevs.add(n)
110 self.bundlerevs.add(n)
111 n += 1
111 n += 1
112
112
113 def _chunk(self, rev, df=None):
113 def _chunk(self, rev, df=None):
114 # Warning: in case of bundle, the diff is against what we stored as
114 # Warning: in case of bundle, the diff is against what we stored as
115 # delta base, not against rev - 1
115 # delta base, not against rev - 1
116 # XXX: could use some caching
116 # XXX: could use some caching
117 if rev <= self.repotiprev:
117 if rev <= self.repotiprev:
118 return revlog.revlog._chunk(self, rev)
118 return revlog.revlog._chunk(self, rev)
119 self.bundle.seek(self.start(rev))
119 self.bundle.seek(self.start(rev))
120 return self.bundle.read(self.length(rev))
120 return self.bundle.read(self.length(rev))
121
121
122 def revdiff(self, rev1, rev2):
122 def revdiff(self, rev1, rev2):
123 """return or calculate a delta between two revisions"""
123 """return or calculate a delta between two revisions"""
124 if rev1 > self.repotiprev and rev2 > self.repotiprev:
124 if rev1 > self.repotiprev and rev2 > self.repotiprev:
125 # hot path for bundle
125 # hot path for bundle
126 revb = self.index[rev2][3]
126 revb = self.index[rev2][3]
127 if revb == rev1:
127 if revb == rev1:
128 return self._chunk(rev2)
128 return self._chunk(rev2)
129 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
129 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
130 return revlog.revlog.revdiff(self, rev1, rev2)
130 return revlog.revlog.revdiff(self, rev1, rev2)
131
131
132 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
132 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
133
133
134 def _rawtext(self, node, rev, _df=None):
134 def _rawtext(self, node, rev, _df=None):
135 if rev is None:
135 if rev is None:
136 rev = self.rev(node)
136 rev = self.rev(node)
137 validated = False
137 validated = False
138 rawtext = None
138 rawtext = None
139 chain = []
139 chain = []
140 iterrev = rev
140 iterrev = rev
141 # reconstruct the revision if it is from a changegroup
141 # reconstruct the revision if it is from a changegroup
142 while iterrev > self.repotiprev:
142 while iterrev > self.repotiprev:
143 if self._revisioncache and self._revisioncache[1] == iterrev:
143 if self._revisioncache and self._revisioncache[1] == iterrev:
144 rawtext = self._revisioncache[2]
144 rawtext = self._revisioncache[2]
145 break
145 break
146 chain.append(iterrev)
146 chain.append(iterrev)
147 iterrev = self.index[iterrev][3]
147 iterrev = self.index[iterrev][3]
148 if iterrev == nullrev:
148 if iterrev == nullrev:
149 rawtext = b''
149 rawtext = b''
150 elif rawtext is None:
150 elif rawtext is None:
151 r = super(bundlerevlog, self)._rawtext(
151 r = super(bundlerevlog, self)._rawtext(
152 self.node(iterrev), iterrev, _df=_df
152 self.node(iterrev), iterrev, _df=_df
153 )
153 )
154 __, rawtext, validated = r
154 __, rawtext, validated = r
155 if chain:
155 if chain:
156 validated = False
156 validated = False
157 while chain:
157 while chain:
158 delta = self._chunk(chain.pop())
158 delta = self._chunk(chain.pop())
159 rawtext = mdiff.patches(rawtext, [delta])
159 rawtext = mdiff.patches(rawtext, [delta])
160 return rev, rawtext, validated
160 return rev, rawtext, validated
161
161
162 def addrevision(self, *args, **kwargs):
162 def addrevision(self, *args, **kwargs):
163 raise NotImplementedError
163 raise NotImplementedError
164
164
165 def addgroup(self, *args, **kwargs):
165 def addgroup(self, *args, **kwargs):
166 raise NotImplementedError
166 raise NotImplementedError
167
167
168 def strip(self, *args, **kwargs):
168 def strip(self, *args, **kwargs):
169 raise NotImplementedError
169 raise NotImplementedError
170
170
171 def checksize(self):
171 def checksize(self):
172 raise NotImplementedError
172 raise NotImplementedError
173
173
174
174
175 class bundlechangelog(bundlerevlog, changelog.changelog):
175 class bundlechangelog(bundlerevlog, changelog.changelog):
176 def __init__(self, opener, cgunpacker):
176 def __init__(self, opener, cgunpacker):
177 changelog.changelog.__init__(self, opener)
177 changelog.changelog.__init__(self, opener)
178 linkmapper = lambda x: x
178 linkmapper = lambda x: x
179 bundlerevlog.__init__(
179 bundlerevlog.__init__(
180 self,
180 self,
181 opener,
181 opener,
182 (revlog_constants.KIND_CHANGELOG, None),
182 (revlog_constants.KIND_CHANGELOG, None),
183 self._indexfile,
183 self.radix,
184 cgunpacker,
184 cgunpacker,
185 linkmapper,
185 linkmapper,
186 )
186 )
187
187
188
188
189 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
189 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
190 def __init__(
190 def __init__(
191 self,
191 self,
192 nodeconstants,
192 nodeconstants,
193 opener,
193 opener,
194 cgunpacker,
194 cgunpacker,
195 linkmapper,
195 linkmapper,
196 dirlogstarts=None,
196 dirlogstarts=None,
197 dir=b'',
197 dir=b'',
198 ):
198 ):
199 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
199 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
200 bundlerevlog.__init__(
200 bundlerevlog.__init__(
201 self,
201 self,
202 opener,
202 opener,
203 (revlog_constants.KIND_MANIFESTLOG, dir),
203 (revlog_constants.KIND_MANIFESTLOG, dir),
204 self._revlog._indexfile,
204 self._revlog.radix,
205 cgunpacker,
205 cgunpacker,
206 linkmapper,
206 linkmapper,
207 )
207 )
208 if dirlogstarts is None:
208 if dirlogstarts is None:
209 dirlogstarts = {}
209 dirlogstarts = {}
210 if self.bundle.version == b"03":
210 if self.bundle.version == b"03":
211 dirlogstarts = _getfilestarts(self.bundle)
211 dirlogstarts = _getfilestarts(self.bundle)
212 self._dirlogstarts = dirlogstarts
212 self._dirlogstarts = dirlogstarts
213 self._linkmapper = linkmapper
213 self._linkmapper = linkmapper
214
214
215 def dirlog(self, d):
215 def dirlog(self, d):
216 if d in self._dirlogstarts:
216 if d in self._dirlogstarts:
217 self.bundle.seek(self._dirlogstarts[d])
217 self.bundle.seek(self._dirlogstarts[d])
218 return bundlemanifest(
218 return bundlemanifest(
219 self.nodeconstants,
219 self.nodeconstants,
220 self.opener,
220 self.opener,
221 self.bundle,
221 self.bundle,
222 self._linkmapper,
222 self._linkmapper,
223 self._dirlogstarts,
223 self._dirlogstarts,
224 dir=d,
224 dir=d,
225 )
225 )
226 return super(bundlemanifest, self).dirlog(d)
226 return super(bundlemanifest, self).dirlog(d)
227
227
228
228
229 class bundlefilelog(filelog.filelog):
229 class bundlefilelog(filelog.filelog):
230 def __init__(self, opener, path, cgunpacker, linkmapper):
230 def __init__(self, opener, path, cgunpacker, linkmapper):
231 filelog.filelog.__init__(self, opener, path)
231 filelog.filelog.__init__(self, opener, path)
232 self._revlog = bundlerevlog(
232 self._revlog = bundlerevlog(
233 opener,
233 opener,
234 # XXX should use the unencoded path
234 # XXX should use the unencoded path
235 target=(revlog_constants.KIND_FILELOG, path),
235 target=(revlog_constants.KIND_FILELOG, path),
236 indexfile=self._revlog._indexfile,
236 radix=self._revlog.radix,
237 cgunpacker=cgunpacker,
237 cgunpacker=cgunpacker,
238 linkmapper=linkmapper,
238 linkmapper=linkmapper,
239 )
239 )
240
240
241
241
242 class bundlepeer(localrepo.localpeer):
242 class bundlepeer(localrepo.localpeer):
243 def canpush(self):
243 def canpush(self):
244 return False
244 return False
245
245
246
246
247 class bundlephasecache(phases.phasecache):
247 class bundlephasecache(phases.phasecache):
248 def __init__(self, *args, **kwargs):
248 def __init__(self, *args, **kwargs):
249 super(bundlephasecache, self).__init__(*args, **kwargs)
249 super(bundlephasecache, self).__init__(*args, **kwargs)
250 if util.safehasattr(self, 'opener'):
250 if util.safehasattr(self, 'opener'):
251 self.opener = vfsmod.readonlyvfs(self.opener)
251 self.opener = vfsmod.readonlyvfs(self.opener)
252
252
253 def write(self):
253 def write(self):
254 raise NotImplementedError
254 raise NotImplementedError
255
255
256 def _write(self, fp):
256 def _write(self, fp):
257 raise NotImplementedError
257 raise NotImplementedError
258
258
259 def _updateroots(self, phase, newroots, tr):
259 def _updateroots(self, phase, newroots, tr):
260 self.phaseroots[phase] = newroots
260 self.phaseroots[phase] = newroots
261 self.invalidate()
261 self.invalidate()
262 self.dirty = True
262 self.dirty = True
263
263
264
264
265 def _getfilestarts(cgunpacker):
265 def _getfilestarts(cgunpacker):
266 filespos = {}
266 filespos = {}
267 for chunkdata in iter(cgunpacker.filelogheader, {}):
267 for chunkdata in iter(cgunpacker.filelogheader, {}):
268 fname = chunkdata[b'filename']
268 fname = chunkdata[b'filename']
269 filespos[fname] = cgunpacker.tell()
269 filespos[fname] = cgunpacker.tell()
270 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
270 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
271 pass
271 pass
272 return filespos
272 return filespos
273
273
274
274
275 class bundlerepository(object):
275 class bundlerepository(object):
276 """A repository instance that is a union of a local repo and a bundle.
276 """A repository instance that is a union of a local repo and a bundle.
277
277
278 Instances represent a read-only repository composed of a local repository
278 Instances represent a read-only repository composed of a local repository
279 with the contents of a bundle file applied. The repository instance is
279 with the contents of a bundle file applied. The repository instance is
280 conceptually similar to the state of a repository after an
280 conceptually similar to the state of a repository after an
281 ``hg unbundle`` operation. However, the contents of the bundle are never
281 ``hg unbundle`` operation. However, the contents of the bundle are never
282 applied to the actual base repository.
282 applied to the actual base repository.
283
283
284 Instances constructed directly are not usable as repository objects.
284 Instances constructed directly are not usable as repository objects.
285 Use instance() or makebundlerepository() to create instances.
285 Use instance() or makebundlerepository() to create instances.
286 """
286 """
287
287
288 def __init__(self, bundlepath, url, tempparent):
288 def __init__(self, bundlepath, url, tempparent):
289 self._tempparent = tempparent
289 self._tempparent = tempparent
290 self._url = url
290 self._url = url
291
291
292 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
292 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
293
293
294 self.tempfile = None
294 self.tempfile = None
295 f = util.posixfile(bundlepath, b"rb")
295 f = util.posixfile(bundlepath, b"rb")
296 bundle = exchange.readbundle(self.ui, f, bundlepath)
296 bundle = exchange.readbundle(self.ui, f, bundlepath)
297
297
298 if isinstance(bundle, bundle2.unbundle20):
298 if isinstance(bundle, bundle2.unbundle20):
299 self._bundlefile = bundle
299 self._bundlefile = bundle
300 self._cgunpacker = None
300 self._cgunpacker = None
301
301
302 cgpart = None
302 cgpart = None
303 for part in bundle.iterparts(seekable=True):
303 for part in bundle.iterparts(seekable=True):
304 if part.type == b'changegroup':
304 if part.type == b'changegroup':
305 if cgpart:
305 if cgpart:
306 raise NotImplementedError(
306 raise NotImplementedError(
307 b"can't process multiple changegroups"
307 b"can't process multiple changegroups"
308 )
308 )
309 cgpart = part
309 cgpart = part
310
310
311 self._handlebundle2part(bundle, part)
311 self._handlebundle2part(bundle, part)
312
312
313 if not cgpart:
313 if not cgpart:
314 raise error.Abort(_(b"No changegroups found"))
314 raise error.Abort(_(b"No changegroups found"))
315
315
316 # This is required to placate a later consumer, which expects
316 # This is required to placate a later consumer, which expects
317 # the payload offset to be at the beginning of the changegroup.
317 # the payload offset to be at the beginning of the changegroup.
318 # We need to do this after the iterparts() generator advances
318 # We need to do this after the iterparts() generator advances
319 # because iterparts() will seek to end of payload after the
319 # because iterparts() will seek to end of payload after the
320 # generator returns control to iterparts().
320 # generator returns control to iterparts().
321 cgpart.seek(0, os.SEEK_SET)
321 cgpart.seek(0, os.SEEK_SET)
322
322
323 elif isinstance(bundle, changegroup.cg1unpacker):
323 elif isinstance(bundle, changegroup.cg1unpacker):
324 if bundle.compressed():
324 if bundle.compressed():
325 f = self._writetempbundle(
325 f = self._writetempbundle(
326 bundle.read, b'.hg10un', header=b'HG10UN'
326 bundle.read, b'.hg10un', header=b'HG10UN'
327 )
327 )
328 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
328 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
329
329
330 self._bundlefile = bundle
330 self._bundlefile = bundle
331 self._cgunpacker = bundle
331 self._cgunpacker = bundle
332 else:
332 else:
333 raise error.Abort(
333 raise error.Abort(
334 _(b'bundle type %s cannot be read') % type(bundle)
334 _(b'bundle type %s cannot be read') % type(bundle)
335 )
335 )
336
336
337 # dict with the mapping 'filename' -> position in the changegroup.
337 # dict with the mapping 'filename' -> position in the changegroup.
338 self._cgfilespos = {}
338 self._cgfilespos = {}
339
339
340 self.firstnewrev = self.changelog.repotiprev + 1
340 self.firstnewrev = self.changelog.repotiprev + 1
341 phases.retractboundary(
341 phases.retractboundary(
342 self,
342 self,
343 None,
343 None,
344 phases.draft,
344 phases.draft,
345 [ctx.node() for ctx in self[self.firstnewrev :]],
345 [ctx.node() for ctx in self[self.firstnewrev :]],
346 )
346 )
347
347
348 def _handlebundle2part(self, bundle, part):
348 def _handlebundle2part(self, bundle, part):
349 if part.type != b'changegroup':
349 if part.type != b'changegroup':
350 return
350 return
351
351
352 cgstream = part
352 cgstream = part
353 version = part.params.get(b'version', b'01')
353 version = part.params.get(b'version', b'01')
354 legalcgvers = changegroup.supportedincomingversions(self)
354 legalcgvers = changegroup.supportedincomingversions(self)
355 if version not in legalcgvers:
355 if version not in legalcgvers:
356 msg = _(b'Unsupported changegroup version: %s')
356 msg = _(b'Unsupported changegroup version: %s')
357 raise error.Abort(msg % version)
357 raise error.Abort(msg % version)
358 if bundle.compressed():
358 if bundle.compressed():
359 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
359 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
360
360
361 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
361 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
362
362
363 def _writetempbundle(self, readfn, suffix, header=b''):
363 def _writetempbundle(self, readfn, suffix, header=b''):
364 """Write a temporary file to disk"""
364 """Write a temporary file to disk"""
365 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
365 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
366 self.tempfile = temp
366 self.tempfile = temp
367
367
368 with os.fdopen(fdtemp, 'wb') as fptemp:
368 with os.fdopen(fdtemp, 'wb') as fptemp:
369 fptemp.write(header)
369 fptemp.write(header)
370 while True:
370 while True:
371 chunk = readfn(2 ** 18)
371 chunk = readfn(2 ** 18)
372 if not chunk:
372 if not chunk:
373 break
373 break
374 fptemp.write(chunk)
374 fptemp.write(chunk)
375
375
376 return self.vfs.open(self.tempfile, mode=b"rb")
376 return self.vfs.open(self.tempfile, mode=b"rb")
377
377
378 @localrepo.unfilteredpropertycache
378 @localrepo.unfilteredpropertycache
379 def _phasecache(self):
379 def _phasecache(self):
380 return bundlephasecache(self, self._phasedefaults)
380 return bundlephasecache(self, self._phasedefaults)
381
381
382 @localrepo.unfilteredpropertycache
382 @localrepo.unfilteredpropertycache
383 def changelog(self):
383 def changelog(self):
384 # consume the header if it exists
384 # consume the header if it exists
385 self._cgunpacker.changelogheader()
385 self._cgunpacker.changelogheader()
386 c = bundlechangelog(self.svfs, self._cgunpacker)
386 c = bundlechangelog(self.svfs, self._cgunpacker)
387 self.manstart = self._cgunpacker.tell()
387 self.manstart = self._cgunpacker.tell()
388 return c
388 return c
389
389
390 def _refreshchangelog(self):
390 def _refreshchangelog(self):
391 # changelog for bundle repo are not filecache, this method is not
391 # changelog for bundle repo are not filecache, this method is not
392 # applicable.
392 # applicable.
393 pass
393 pass
394
394
395 @localrepo.unfilteredpropertycache
395 @localrepo.unfilteredpropertycache
396 def manifestlog(self):
396 def manifestlog(self):
397 self._cgunpacker.seek(self.manstart)
397 self._cgunpacker.seek(self.manstart)
398 # consume the header if it exists
398 # consume the header if it exists
399 self._cgunpacker.manifestheader()
399 self._cgunpacker.manifestheader()
400 linkmapper = self.unfiltered().changelog.rev
400 linkmapper = self.unfiltered().changelog.rev
401 rootstore = bundlemanifest(
401 rootstore = bundlemanifest(
402 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
402 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
403 )
403 )
404 self.filestart = self._cgunpacker.tell()
404 self.filestart = self._cgunpacker.tell()
405
405
406 return manifest.manifestlog(
406 return manifest.manifestlog(
407 self.svfs, self, rootstore, self.narrowmatch()
407 self.svfs, self, rootstore, self.narrowmatch()
408 )
408 )
409
409
410 def _consumemanifest(self):
410 def _consumemanifest(self):
411 """Consumes the manifest portion of the bundle, setting filestart so the
411 """Consumes the manifest portion of the bundle, setting filestart so the
412 file portion can be read."""
412 file portion can be read."""
413 self._cgunpacker.seek(self.manstart)
413 self._cgunpacker.seek(self.manstart)
414 self._cgunpacker.manifestheader()
414 self._cgunpacker.manifestheader()
415 for delta in self._cgunpacker.deltaiter():
415 for delta in self._cgunpacker.deltaiter():
416 pass
416 pass
417 self.filestart = self._cgunpacker.tell()
417 self.filestart = self._cgunpacker.tell()
418
418
419 @localrepo.unfilteredpropertycache
419 @localrepo.unfilteredpropertycache
420 def manstart(self):
420 def manstart(self):
421 self.changelog
421 self.changelog
422 return self.manstart
422 return self.manstart
423
423
424 @localrepo.unfilteredpropertycache
424 @localrepo.unfilteredpropertycache
425 def filestart(self):
425 def filestart(self):
426 self.manifestlog
426 self.manifestlog
427
427
428 # If filestart was not set by self.manifestlog, that means the
428 # If filestart was not set by self.manifestlog, that means the
429 # manifestlog implementation did not consume the manifests from the
429 # manifestlog implementation did not consume the manifests from the
430 # changegroup (ex: it might be consuming trees from a separate bundle2
430 # changegroup (ex: it might be consuming trees from a separate bundle2
431 # part instead). So we need to manually consume it.
431 # part instead). So we need to manually consume it.
432 if 'filestart' not in self.__dict__:
432 if 'filestart' not in self.__dict__:
433 self._consumemanifest()
433 self._consumemanifest()
434
434
435 return self.filestart
435 return self.filestart
436
436
437 def url(self):
437 def url(self):
438 return self._url
438 return self._url
439
439
440 def file(self, f):
440 def file(self, f):
441 if not self._cgfilespos:
441 if not self._cgfilespos:
442 self._cgunpacker.seek(self.filestart)
442 self._cgunpacker.seek(self.filestart)
443 self._cgfilespos = _getfilestarts(self._cgunpacker)
443 self._cgfilespos = _getfilestarts(self._cgunpacker)
444
444
445 if f in self._cgfilespos:
445 if f in self._cgfilespos:
446 self._cgunpacker.seek(self._cgfilespos[f])
446 self._cgunpacker.seek(self._cgfilespos[f])
447 linkmapper = self.unfiltered().changelog.rev
447 linkmapper = self.unfiltered().changelog.rev
448 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
448 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
449 else:
449 else:
450 return super(bundlerepository, self).file(f)
450 return super(bundlerepository, self).file(f)
451
451
452 def close(self):
452 def close(self):
453 """Close assigned bundle file immediately."""
453 """Close assigned bundle file immediately."""
454 self._bundlefile.close()
454 self._bundlefile.close()
455 if self.tempfile is not None:
455 if self.tempfile is not None:
456 self.vfs.unlink(self.tempfile)
456 self.vfs.unlink(self.tempfile)
457 if self._tempparent:
457 if self._tempparent:
458 shutil.rmtree(self._tempparent, True)
458 shutil.rmtree(self._tempparent, True)
459
459
460 def cancopy(self):
460 def cancopy(self):
461 return False
461 return False
462
462
463 def peer(self):
463 def peer(self):
464 return bundlepeer(self)
464 return bundlepeer(self)
465
465
466 def getcwd(self):
466 def getcwd(self):
467 return encoding.getcwd() # always outside the repo
467 return encoding.getcwd() # always outside the repo
468
468
469 # Check if parents exist in localrepo before setting
469 # Check if parents exist in localrepo before setting
470 def setparents(self, p1, p2=None):
470 def setparents(self, p1, p2=None):
471 if p2 is None:
471 if p2 is None:
472 p2 = self.nullid
472 p2 = self.nullid
473 p1rev = self.changelog.rev(p1)
473 p1rev = self.changelog.rev(p1)
474 p2rev = self.changelog.rev(p2)
474 p2rev = self.changelog.rev(p2)
475 msg = _(b"setting parent to node %s that only exists in the bundle\n")
475 msg = _(b"setting parent to node %s that only exists in the bundle\n")
476 if self.changelog.repotiprev < p1rev:
476 if self.changelog.repotiprev < p1rev:
477 self.ui.warn(msg % hex(p1))
477 self.ui.warn(msg % hex(p1))
478 if self.changelog.repotiprev < p2rev:
478 if self.changelog.repotiprev < p2rev:
479 self.ui.warn(msg % hex(p2))
479 self.ui.warn(msg % hex(p2))
480 return super(bundlerepository, self).setparents(p1, p2)
480 return super(bundlerepository, self).setparents(p1, p2)
481
481
482
482
483 def instance(ui, path, create, intents=None, createopts=None):
483 def instance(ui, path, create, intents=None, createopts=None):
484 if create:
484 if create:
485 raise error.Abort(_(b'cannot create new bundle repository'))
485 raise error.Abort(_(b'cannot create new bundle repository'))
486 # internal config: bundle.mainreporoot
486 # internal config: bundle.mainreporoot
487 parentpath = ui.config(b"bundle", b"mainreporoot")
487 parentpath = ui.config(b"bundle", b"mainreporoot")
488 if not parentpath:
488 if not parentpath:
489 # try to find the correct path to the working directory repo
489 # try to find the correct path to the working directory repo
490 parentpath = cmdutil.findrepo(encoding.getcwd())
490 parentpath = cmdutil.findrepo(encoding.getcwd())
491 if parentpath is None:
491 if parentpath is None:
492 parentpath = b''
492 parentpath = b''
493 if parentpath:
493 if parentpath:
494 # Try to make the full path relative so we get a nice, short URL.
494 # Try to make the full path relative so we get a nice, short URL.
495 # In particular, we don't want temp dir names in test outputs.
495 # In particular, we don't want temp dir names in test outputs.
496 cwd = encoding.getcwd()
496 cwd = encoding.getcwd()
497 if parentpath == cwd:
497 if parentpath == cwd:
498 parentpath = b''
498 parentpath = b''
499 else:
499 else:
500 cwd = pathutil.normasprefix(cwd)
500 cwd = pathutil.normasprefix(cwd)
501 if parentpath.startswith(cwd):
501 if parentpath.startswith(cwd):
502 parentpath = parentpath[len(cwd) :]
502 parentpath = parentpath[len(cwd) :]
503 u = urlutil.url(path)
503 u = urlutil.url(path)
504 path = u.localpath()
504 path = u.localpath()
505 if u.scheme == b'bundle':
505 if u.scheme == b'bundle':
506 s = path.split(b"+", 1)
506 s = path.split(b"+", 1)
507 if len(s) == 1:
507 if len(s) == 1:
508 repopath, bundlename = parentpath, s[0]
508 repopath, bundlename = parentpath, s[0]
509 else:
509 else:
510 repopath, bundlename = s
510 repopath, bundlename = s
511 else:
511 else:
512 repopath, bundlename = parentpath, path
512 repopath, bundlename = parentpath, path
513
513
514 return makebundlerepository(ui, repopath, bundlename)
514 return makebundlerepository(ui, repopath, bundlename)
515
515
516
516
517 def makebundlerepository(ui, repopath, bundlepath):
517 def makebundlerepository(ui, repopath, bundlepath):
518 """Make a bundle repository object based on repo and bundle paths."""
518 """Make a bundle repository object based on repo and bundle paths."""
519 if repopath:
519 if repopath:
520 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
520 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
521 else:
521 else:
522 url = b'bundle:%s' % bundlepath
522 url = b'bundle:%s' % bundlepath
523
523
524 # Because we can't make any guarantees about the type of the base
524 # Because we can't make any guarantees about the type of the base
525 # repository, we can't have a static class representing the bundle
525 # repository, we can't have a static class representing the bundle
526 # repository. We also can't make any guarantees about how to even
526 # repository. We also can't make any guarantees about how to even
527 # call the base repository's constructor!
527 # call the base repository's constructor!
528 #
528 #
529 # So, our strategy is to go through ``localrepo.instance()`` to construct
529 # So, our strategy is to go through ``localrepo.instance()`` to construct
530 # a repo instance. Then, we dynamically create a new type derived from
530 # a repo instance. Then, we dynamically create a new type derived from
531 # both it and our ``bundlerepository`` class which overrides some
531 # both it and our ``bundlerepository`` class which overrides some
532 # functionality. We then change the type of the constructed repository
532 # functionality. We then change the type of the constructed repository
533 # to this new type and initialize the bundle-specific bits of it.
533 # to this new type and initialize the bundle-specific bits of it.
534
534
535 try:
535 try:
536 repo = localrepo.instance(ui, repopath, create=False)
536 repo = localrepo.instance(ui, repopath, create=False)
537 tempparent = None
537 tempparent = None
538 except error.RepoError:
538 except error.RepoError:
539 tempparent = pycompat.mkdtemp()
539 tempparent = pycompat.mkdtemp()
540 try:
540 try:
541 repo = localrepo.instance(ui, tempparent, create=True)
541 repo = localrepo.instance(ui, tempparent, create=True)
542 except Exception:
542 except Exception:
543 shutil.rmtree(tempparent)
543 shutil.rmtree(tempparent)
544 raise
544 raise
545
545
546 class derivedbundlerepository(bundlerepository, repo.__class__):
546 class derivedbundlerepository(bundlerepository, repo.__class__):
547 pass
547 pass
548
548
549 repo.__class__ = derivedbundlerepository
549 repo.__class__ = derivedbundlerepository
550 bundlerepository.__init__(repo, bundlepath, url, tempparent)
550 bundlerepository.__init__(repo, bundlepath, url, tempparent)
551
551
552 return repo
552 return repo
553
553
554
554
555 class bundletransactionmanager(object):
555 class bundletransactionmanager(object):
556 def transaction(self):
556 def transaction(self):
557 return None
557 return None
558
558
559 def close(self):
559 def close(self):
560 raise NotImplementedError
560 raise NotImplementedError
561
561
562 def release(self):
562 def release(self):
563 raise NotImplementedError
563 raise NotImplementedError
564
564
565
565
566 def getremotechanges(
566 def getremotechanges(
567 ui, repo, peer, onlyheads=None, bundlename=None, force=False
567 ui, repo, peer, onlyheads=None, bundlename=None, force=False
568 ):
568 ):
569 """obtains a bundle of changes incoming from peer
569 """obtains a bundle of changes incoming from peer
570
570
571 "onlyheads" restricts the returned changes to those reachable from the
571 "onlyheads" restricts the returned changes to those reachable from the
572 specified heads.
572 specified heads.
573 "bundlename", if given, stores the bundle to this file path permanently;
573 "bundlename", if given, stores the bundle to this file path permanently;
574 otherwise it's stored to a temp file and gets deleted again when you call
574 otherwise it's stored to a temp file and gets deleted again when you call
575 the returned "cleanupfn".
575 the returned "cleanupfn".
576 "force" indicates whether to proceed on unrelated repos.
576 "force" indicates whether to proceed on unrelated repos.
577
577
578 Returns a tuple (local, csets, cleanupfn):
578 Returns a tuple (local, csets, cleanupfn):
579
579
580 "local" is a local repo from which to obtain the actual incoming
580 "local" is a local repo from which to obtain the actual incoming
581 changesets; it is a bundlerepo for the obtained bundle when the
581 changesets; it is a bundlerepo for the obtained bundle when the
582 original "peer" is remote.
582 original "peer" is remote.
583 "csets" lists the incoming changeset node ids.
583 "csets" lists the incoming changeset node ids.
584 "cleanupfn" must be called without arguments when you're done processing
584 "cleanupfn" must be called without arguments when you're done processing
585 the changes; it closes both the original "peer" and the one returned
585 the changes; it closes both the original "peer" and the one returned
586 here.
586 here.
587 """
587 """
588 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
588 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
589 common, incoming, rheads = tmp
589 common, incoming, rheads = tmp
590 if not incoming:
590 if not incoming:
591 try:
591 try:
592 if bundlename:
592 if bundlename:
593 os.unlink(bundlename)
593 os.unlink(bundlename)
594 except OSError:
594 except OSError:
595 pass
595 pass
596 return repo, [], peer.close
596 return repo, [], peer.close
597
597
598 commonset = set(common)
598 commonset = set(common)
599 rheads = [x for x in rheads if x not in commonset]
599 rheads = [x for x in rheads if x not in commonset]
600
600
601 bundle = None
601 bundle = None
602 bundlerepo = None
602 bundlerepo = None
603 localrepo = peer.local()
603 localrepo = peer.local()
604 if bundlename or not localrepo:
604 if bundlename or not localrepo:
605 # create a bundle (uncompressed if peer repo is not local)
605 # create a bundle (uncompressed if peer repo is not local)
606
606
607 # developer config: devel.legacy.exchange
607 # developer config: devel.legacy.exchange
608 legexc = ui.configlist(b'devel', b'legacy.exchange')
608 legexc = ui.configlist(b'devel', b'legacy.exchange')
609 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
609 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
610 canbundle2 = (
610 canbundle2 = (
611 not forcebundle1
611 not forcebundle1
612 and peer.capable(b'getbundle')
612 and peer.capable(b'getbundle')
613 and peer.capable(b'bundle2')
613 and peer.capable(b'bundle2')
614 )
614 )
615 if canbundle2:
615 if canbundle2:
616 with peer.commandexecutor() as e:
616 with peer.commandexecutor() as e:
617 b2 = e.callcommand(
617 b2 = e.callcommand(
618 b'getbundle',
618 b'getbundle',
619 {
619 {
620 b'source': b'incoming',
620 b'source': b'incoming',
621 b'common': common,
621 b'common': common,
622 b'heads': rheads,
622 b'heads': rheads,
623 b'bundlecaps': exchange.caps20to10(
623 b'bundlecaps': exchange.caps20to10(
624 repo, role=b'client'
624 repo, role=b'client'
625 ),
625 ),
626 b'cg': True,
626 b'cg': True,
627 },
627 },
628 ).result()
628 ).result()
629
629
630 fname = bundle = changegroup.writechunks(
630 fname = bundle = changegroup.writechunks(
631 ui, b2._forwardchunks(), bundlename
631 ui, b2._forwardchunks(), bundlename
632 )
632 )
633 else:
633 else:
634 if peer.capable(b'getbundle'):
634 if peer.capable(b'getbundle'):
635 with peer.commandexecutor() as e:
635 with peer.commandexecutor() as e:
636 cg = e.callcommand(
636 cg = e.callcommand(
637 b'getbundle',
637 b'getbundle',
638 {
638 {
639 b'source': b'incoming',
639 b'source': b'incoming',
640 b'common': common,
640 b'common': common,
641 b'heads': rheads,
641 b'heads': rheads,
642 },
642 },
643 ).result()
643 ).result()
644 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
644 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
645 # compat with older servers when pulling all remote heads
645 # compat with older servers when pulling all remote heads
646
646
647 with peer.commandexecutor() as e:
647 with peer.commandexecutor() as e:
648 cg = e.callcommand(
648 cg = e.callcommand(
649 b'changegroup',
649 b'changegroup',
650 {
650 {
651 b'nodes': incoming,
651 b'nodes': incoming,
652 b'source': b'incoming',
652 b'source': b'incoming',
653 },
653 },
654 ).result()
654 ).result()
655
655
656 rheads = None
656 rheads = None
657 else:
657 else:
658 with peer.commandexecutor() as e:
658 with peer.commandexecutor() as e:
659 cg = e.callcommand(
659 cg = e.callcommand(
660 b'changegroupsubset',
660 b'changegroupsubset',
661 {
661 {
662 b'bases': incoming,
662 b'bases': incoming,
663 b'heads': rheads,
663 b'heads': rheads,
664 b'source': b'incoming',
664 b'source': b'incoming',
665 },
665 },
666 ).result()
666 ).result()
667
667
668 if localrepo:
668 if localrepo:
669 bundletype = b"HG10BZ"
669 bundletype = b"HG10BZ"
670 else:
670 else:
671 bundletype = b"HG10UN"
671 bundletype = b"HG10UN"
672 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
672 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
673 # keep written bundle?
673 # keep written bundle?
674 if bundlename:
674 if bundlename:
675 bundle = None
675 bundle = None
676 if not localrepo:
676 if not localrepo:
677 # use the created uncompressed bundlerepo
677 # use the created uncompressed bundlerepo
678 localrepo = bundlerepo = makebundlerepository(
678 localrepo = bundlerepo = makebundlerepository(
679 repo.baseui, repo.root, fname
679 repo.baseui, repo.root, fname
680 )
680 )
681
681
682 # this repo contains local and peer now, so filter out local again
682 # this repo contains local and peer now, so filter out local again
683 common = repo.heads()
683 common = repo.heads()
684 if localrepo:
684 if localrepo:
685 # Part of common may be remotely filtered
685 # Part of common may be remotely filtered
686 # So use an unfiltered version
686 # So use an unfiltered version
687 # The discovery process probably need cleanup to avoid that
687 # The discovery process probably need cleanup to avoid that
688 localrepo = localrepo.unfiltered()
688 localrepo = localrepo.unfiltered()
689
689
690 csets = localrepo.changelog.findmissing(common, rheads)
690 csets = localrepo.changelog.findmissing(common, rheads)
691
691
692 if bundlerepo:
692 if bundlerepo:
693 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
693 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
694
694
695 with peer.commandexecutor() as e:
695 with peer.commandexecutor() as e:
696 remotephases = e.callcommand(
696 remotephases = e.callcommand(
697 b'listkeys',
697 b'listkeys',
698 {
698 {
699 b'namespace': b'phases',
699 b'namespace': b'phases',
700 },
700 },
701 ).result()
701 ).result()
702
702
703 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
703 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
704 pullop.trmanager = bundletransactionmanager()
704 pullop.trmanager = bundletransactionmanager()
705 exchange._pullapplyphases(pullop, remotephases)
705 exchange._pullapplyphases(pullop, remotephases)
706
706
707 def cleanup():
707 def cleanup():
708 if bundlerepo:
708 if bundlerepo:
709 bundlerepo.close()
709 bundlerepo.close()
710 if bundle:
710 if bundle:
711 os.unlink(bundle)
711 os.unlink(bundle)
712 peer.close()
712 peer.close()
713
713
714 return (localrepo, csets, cleanup)
714 return (localrepo, csets, cleanup)
@@ -1,628 +1,625 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 )
14 )
15 from .thirdparty import attr
15 from .thirdparty import attr
16
16
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 metadata,
20 metadata,
21 pycompat,
21 pycompat,
22 revlog,
22 revlog,
23 )
23 )
24 from .utils import (
24 from .utils import (
25 dateutil,
25 dateutil,
26 stringutil,
26 stringutil,
27 )
27 )
28 from .revlogutils import (
28 from .revlogutils import (
29 constants as revlog_constants,
29 constants as revlog_constants,
30 flagutil,
30 flagutil,
31 )
31 )
32
32
33 _defaultextra = {b'branch': b'default'}
33 _defaultextra = {b'branch': b'default'}
34
34
35
35
36 def _string_escape(text):
36 def _string_escape(text):
37 """
37 """
38 >>> from .pycompat import bytechr as chr
38 >>> from .pycompat import bytechr as chr
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
41 >>> s
41 >>> s
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
43 >>> res = _string_escape(s)
43 >>> res = _string_escape(s)
44 >>> s == _string_unescape(res)
44 >>> s == _string_unescape(res)
45 True
45 True
46 """
46 """
47 # subset of the string_escape codec
47 # subset of the string_escape codec
48 text = (
48 text = (
49 text.replace(b'\\', b'\\\\')
49 text.replace(b'\\', b'\\\\')
50 .replace(b'\n', b'\\n')
50 .replace(b'\n', b'\\n')
51 .replace(b'\r', b'\\r')
51 .replace(b'\r', b'\\r')
52 )
52 )
53 return text.replace(b'\0', b'\\0')
53 return text.replace(b'\0', b'\\0')
54
54
55
55
56 def _string_unescape(text):
56 def _string_unescape(text):
57 if b'\\0' in text:
57 if b'\\0' in text:
58 # fix up \0 without getting into trouble with \\0
58 # fix up \0 without getting into trouble with \\0
59 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\\\', b'\\\\\n')
60 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\\0', b'\0')
61 text = text.replace(b'\n', b'')
61 text = text.replace(b'\n', b'')
62 return stringutil.unescapestr(text)
62 return stringutil.unescapestr(text)
63
63
64
64
65 def decodeextra(text):
65 def decodeextra(text):
66 """
66 """
67 >>> from .pycompat import bytechr as chr
67 >>> from .pycompat import bytechr as chr
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
69 ... ).items())
69 ... ).items())
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
72 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... b'baz': chr(92) + chr(0) + b'2'})
73 ... ).items())
73 ... ).items())
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
75 """
75 """
76 extra = _defaultextra.copy()
76 extra = _defaultextra.copy()
77 for l in text.split(b'\0'):
77 for l in text.split(b'\0'):
78 if l:
78 if l:
79 k, v = _string_unescape(l).split(b':', 1)
79 k, v = _string_unescape(l).split(b':', 1)
80 extra[k] = v
80 extra[k] = v
81 return extra
81 return extra
82
82
83
83
84 def encodeextra(d):
84 def encodeextra(d):
85 # keys must be sorted to produce a deterministic changelog entry
85 # keys must be sorted to produce a deterministic changelog entry
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
87 return b"\0".join(items)
87 return b"\0".join(items)
88
88
89
89
90 def stripdesc(desc):
90 def stripdesc(desc):
91 """strip trailing whitespace and leading and trailing empty lines"""
91 """strip trailing whitespace and leading and trailing empty lines"""
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
93
93
94
94
95 class appender(object):
95 class appender(object):
96 """the changelog index must be updated last on disk, so we use this class
96 """the changelog index must be updated last on disk, so we use this class
97 to delay writes to it"""
97 to delay writes to it"""
98
98
99 def __init__(self, vfs, name, mode, buf):
99 def __init__(self, vfs, name, mode, buf):
100 self.data = buf
100 self.data = buf
101 fp = vfs(name, mode)
101 fp = vfs(name, mode)
102 self.fp = fp
102 self.fp = fp
103 self.offset = fp.tell()
103 self.offset = fp.tell()
104 self.size = vfs.fstat(fp).st_size
104 self.size = vfs.fstat(fp).st_size
105 self._end = self.size
105 self._end = self.size
106
106
107 def end(self):
107 def end(self):
108 return self._end
108 return self._end
109
109
110 def tell(self):
110 def tell(self):
111 return self.offset
111 return self.offset
112
112
113 def flush(self):
113 def flush(self):
114 pass
114 pass
115
115
116 @property
116 @property
117 def closed(self):
117 def closed(self):
118 return self.fp.closed
118 return self.fp.closed
119
119
120 def close(self):
120 def close(self):
121 self.fp.close()
121 self.fp.close()
122
122
123 def seek(self, offset, whence=0):
123 def seek(self, offset, whence=0):
124 '''virtual file offset spans real file and data'''
124 '''virtual file offset spans real file and data'''
125 if whence == 0:
125 if whence == 0:
126 self.offset = offset
126 self.offset = offset
127 elif whence == 1:
127 elif whence == 1:
128 self.offset += offset
128 self.offset += offset
129 elif whence == 2:
129 elif whence == 2:
130 self.offset = self.end() + offset
130 self.offset = self.end() + offset
131 if self.offset < self.size:
131 if self.offset < self.size:
132 self.fp.seek(self.offset)
132 self.fp.seek(self.offset)
133
133
134 def read(self, count=-1):
134 def read(self, count=-1):
135 '''only trick here is reads that span real file and data'''
135 '''only trick here is reads that span real file and data'''
136 ret = b""
136 ret = b""
137 if self.offset < self.size:
137 if self.offset < self.size:
138 s = self.fp.read(count)
138 s = self.fp.read(count)
139 ret = s
139 ret = s
140 self.offset += len(s)
140 self.offset += len(s)
141 if count > 0:
141 if count > 0:
142 count -= len(s)
142 count -= len(s)
143 if count != 0:
143 if count != 0:
144 doff = self.offset - self.size
144 doff = self.offset - self.size
145 self.data.insert(0, b"".join(self.data))
145 self.data.insert(0, b"".join(self.data))
146 del self.data[1:]
146 del self.data[1:]
147 s = self.data[0][doff : doff + count]
147 s = self.data[0][doff : doff + count]
148 self.offset += len(s)
148 self.offset += len(s)
149 ret += s
149 ret += s
150 return ret
150 return ret
151
151
152 def write(self, s):
152 def write(self, s):
153 self.data.append(bytes(s))
153 self.data.append(bytes(s))
154 self.offset += len(s)
154 self.offset += len(s)
155 self._end += len(s)
155 self._end += len(s)
156
156
157 def __enter__(self):
157 def __enter__(self):
158 self.fp.__enter__()
158 self.fp.__enter__()
159 return self
159 return self
160
160
161 def __exit__(self, *args):
161 def __exit__(self, *args):
162 return self.fp.__exit__(*args)
162 return self.fp.__exit__(*args)
163
163
164
164
165 class _divertopener(object):
165 class _divertopener(object):
166 def __init__(self, opener, target):
166 def __init__(self, opener, target):
167 self._opener = opener
167 self._opener = opener
168 self._target = target
168 self._target = target
169
169
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
171 if name != self._target:
171 if name != self._target:
172 return self._opener(name, mode, **kwargs)
172 return self._opener(name, mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
174
174
175 def __getattr__(self, attr):
175 def __getattr__(self, attr):
176 return getattr(self._opener, attr)
176 return getattr(self._opener, attr)
177
177
178
178
179 def _delayopener(opener, target, buf):
179 def _delayopener(opener, target, buf):
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
181
181
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
183 if name != target:
183 if name != target:
184 return opener(name, mode, **kwargs)
184 return opener(name, mode, **kwargs)
185 assert not kwargs
185 assert not kwargs
186 return appender(opener, name, mode, buf)
186 return appender(opener, name, mode, buf)
187
187
188 return _delay
188 return _delay
189
189
190
190
191 @attr.s
191 @attr.s
192 class _changelogrevision(object):
192 class _changelogrevision(object):
193 # Extensions might modify _defaultextra, so let the constructor below pass
193 # Extensions might modify _defaultextra, so let the constructor below pass
194 # it in
194 # it in
195 extra = attr.ib()
195 extra = attr.ib()
196 manifest = attr.ib()
196 manifest = attr.ib()
197 user = attr.ib(default=b'')
197 user = attr.ib(default=b'')
198 date = attr.ib(default=(0, 0))
198 date = attr.ib(default=(0, 0))
199 files = attr.ib(default=attr.Factory(list))
199 files = attr.ib(default=attr.Factory(list))
200 filesadded = attr.ib(default=None)
200 filesadded = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
204 description = attr.ib(default=b'')
204 description = attr.ib(default=b'')
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
206
206
207
207
208 class changelogrevision(object):
208 class changelogrevision(object):
209 """Holds results of a parsed changelog revision.
209 """Holds results of a parsed changelog revision.
210
210
211 Changelog revisions consist of multiple pieces of data, including
211 Changelog revisions consist of multiple pieces of data, including
212 the manifest node, user, and date. This object exposes a view into
212 the manifest node, user, and date. This object exposes a view into
213 the parsed object.
213 the parsed object.
214 """
214 """
215
215
216 __slots__ = (
216 __slots__ = (
217 '_offsets',
217 '_offsets',
218 '_text',
218 '_text',
219 '_sidedata',
219 '_sidedata',
220 '_cpsd',
220 '_cpsd',
221 '_changes',
221 '_changes',
222 )
222 )
223
223
224 def __new__(cls, cl, text, sidedata, cpsd):
224 def __new__(cls, cl, text, sidedata, cpsd):
225 if not text:
225 if not text:
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
227
227
228 self = super(changelogrevision, cls).__new__(cls)
228 self = super(changelogrevision, cls).__new__(cls)
229 # We could return here and implement the following as an __init__.
229 # We could return here and implement the following as an __init__.
230 # But doing it here is equivalent and saves an extra function call.
230 # But doing it here is equivalent and saves an extra function call.
231
231
232 # format used:
232 # format used:
233 # nodeid\n : manifest node in ascii
233 # nodeid\n : manifest node in ascii
234 # user\n : user, no \n or \r allowed
234 # user\n : user, no \n or \r allowed
235 # time tz extra\n : date (time is int or float, timezone is int)
235 # time tz extra\n : date (time is int or float, timezone is int)
236 # : extra is metadata, encoded and separated by '\0'
236 # : extra is metadata, encoded and separated by '\0'
237 # : older versions ignore it
237 # : older versions ignore it
238 # files\n\n : files modified by the cset, no \n or \r allowed
238 # files\n\n : files modified by the cset, no \n or \r allowed
239 # (.*) : comment (free text, ideally utf-8)
239 # (.*) : comment (free text, ideally utf-8)
240 #
240 #
241 # changelog v0 doesn't use extra
241 # changelog v0 doesn't use extra
242
242
243 nl1 = text.index(b'\n')
243 nl1 = text.index(b'\n')
244 nl2 = text.index(b'\n', nl1 + 1)
244 nl2 = text.index(b'\n', nl1 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
246
246
247 # The list of files may be empty. Which means nl3 is the first of the
247 # The list of files may be empty. Which means nl3 is the first of the
248 # double newline that precedes the description.
248 # double newline that precedes the description.
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
250 doublenl = nl3
250 doublenl = nl3
251 else:
251 else:
252 doublenl = text.index(b'\n\n', nl3 + 1)
252 doublenl = text.index(b'\n\n', nl3 + 1)
253
253
254 self._offsets = (nl1, nl2, nl3, doublenl)
254 self._offsets = (nl1, nl2, nl3, doublenl)
255 self._text = text
255 self._text = text
256 self._sidedata = sidedata
256 self._sidedata = sidedata
257 self._cpsd = cpsd
257 self._cpsd = cpsd
258 self._changes = None
258 self._changes = None
259
259
260 return self
260 return self
261
261
262 @property
262 @property
263 def manifest(self):
263 def manifest(self):
264 return bin(self._text[0 : self._offsets[0]])
264 return bin(self._text[0 : self._offsets[0]])
265
265
266 @property
266 @property
267 def user(self):
267 def user(self):
268 off = self._offsets
268 off = self._offsets
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
270
270
271 @property
271 @property
272 def _rawdate(self):
272 def _rawdate(self):
273 off = self._offsets
273 off = self._offsets
274 dateextra = self._text[off[1] + 1 : off[2]]
274 dateextra = self._text[off[1] + 1 : off[2]]
275 return dateextra.split(b' ', 2)[0:2]
275 return dateextra.split(b' ', 2)[0:2]
276
276
277 @property
277 @property
278 def _rawextra(self):
278 def _rawextra(self):
279 off = self._offsets
279 off = self._offsets
280 dateextra = self._text[off[1] + 1 : off[2]]
280 dateextra = self._text[off[1] + 1 : off[2]]
281 fields = dateextra.split(b' ', 2)
281 fields = dateextra.split(b' ', 2)
282 if len(fields) != 3:
282 if len(fields) != 3:
283 return None
283 return None
284
284
285 return fields[2]
285 return fields[2]
286
286
287 @property
287 @property
288 def date(self):
288 def date(self):
289 raw = self._rawdate
289 raw = self._rawdate
290 time = float(raw[0])
290 time = float(raw[0])
291 # Various tools did silly things with the timezone.
291 # Various tools did silly things with the timezone.
292 try:
292 try:
293 timezone = int(raw[1])
293 timezone = int(raw[1])
294 except ValueError:
294 except ValueError:
295 timezone = 0
295 timezone = 0
296
296
297 return time, timezone
297 return time, timezone
298
298
299 @property
299 @property
300 def extra(self):
300 def extra(self):
301 raw = self._rawextra
301 raw = self._rawextra
302 if raw is None:
302 if raw is None:
303 return _defaultextra
303 return _defaultextra
304
304
305 return decodeextra(raw)
305 return decodeextra(raw)
306
306
307 @property
307 @property
308 def changes(self):
308 def changes(self):
309 if self._changes is not None:
309 if self._changes is not None:
310 return self._changes
310 return self._changes
311 if self._cpsd:
311 if self._cpsd:
312 changes = metadata.decode_files_sidedata(self._sidedata)
312 changes = metadata.decode_files_sidedata(self._sidedata)
313 else:
313 else:
314 changes = metadata.ChangingFiles(
314 changes = metadata.ChangingFiles(
315 touched=self.files or (),
315 touched=self.files or (),
316 added=self.filesadded or (),
316 added=self.filesadded or (),
317 removed=self.filesremoved or (),
317 removed=self.filesremoved or (),
318 p1_copies=self.p1copies or {},
318 p1_copies=self.p1copies or {},
319 p2_copies=self.p2copies or {},
319 p2_copies=self.p2copies or {},
320 )
320 )
321 self._changes = changes
321 self._changes = changes
322 return changes
322 return changes
323
323
324 @property
324 @property
325 def files(self):
325 def files(self):
326 if self._cpsd:
326 if self._cpsd:
327 return sorted(self.changes.touched)
327 return sorted(self.changes.touched)
328 off = self._offsets
328 off = self._offsets
329 if off[2] == off[3]:
329 if off[2] == off[3]:
330 return []
330 return []
331
331
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
333
333
334 @property
334 @property
335 def filesadded(self):
335 def filesadded(self):
336 if self._cpsd:
336 if self._cpsd:
337 return self.changes.added
337 return self.changes.added
338 else:
338 else:
339 rawindices = self.extra.get(b'filesadded')
339 rawindices = self.extra.get(b'filesadded')
340 if rawindices is None:
340 if rawindices is None:
341 return None
341 return None
342 return metadata.decodefileindices(self.files, rawindices)
342 return metadata.decodefileindices(self.files, rawindices)
343
343
344 @property
344 @property
345 def filesremoved(self):
345 def filesremoved(self):
346 if self._cpsd:
346 if self._cpsd:
347 return self.changes.removed
347 return self.changes.removed
348 else:
348 else:
349 rawindices = self.extra.get(b'filesremoved')
349 rawindices = self.extra.get(b'filesremoved')
350 if rawindices is None:
350 if rawindices is None:
351 return None
351 return None
352 return metadata.decodefileindices(self.files, rawindices)
352 return metadata.decodefileindices(self.files, rawindices)
353
353
354 @property
354 @property
355 def p1copies(self):
355 def p1copies(self):
356 if self._cpsd:
356 if self._cpsd:
357 return self.changes.copied_from_p1
357 return self.changes.copied_from_p1
358 else:
358 else:
359 rawcopies = self.extra.get(b'p1copies')
359 rawcopies = self.extra.get(b'p1copies')
360 if rawcopies is None:
360 if rawcopies is None:
361 return None
361 return None
362 return metadata.decodecopies(self.files, rawcopies)
362 return metadata.decodecopies(self.files, rawcopies)
363
363
364 @property
364 @property
365 def p2copies(self):
365 def p2copies(self):
366 if self._cpsd:
366 if self._cpsd:
367 return self.changes.copied_from_p2
367 return self.changes.copied_from_p2
368 else:
368 else:
369 rawcopies = self.extra.get(b'p2copies')
369 rawcopies = self.extra.get(b'p2copies')
370 if rawcopies is None:
370 if rawcopies is None:
371 return None
371 return None
372 return metadata.decodecopies(self.files, rawcopies)
372 return metadata.decodecopies(self.files, rawcopies)
373
373
374 @property
374 @property
375 def description(self):
375 def description(self):
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
377
377
378 @property
378 @property
379 def branchinfo(self):
379 def branchinfo(self):
380 extra = self.extra
380 extra = self.extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
382
382
383
383
384 class changelog(revlog.revlog):
384 class changelog(revlog.revlog):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
386 """Load a changelog revlog using an opener.
386 """Load a changelog revlog using an opener.
387
387
388 If ``trypending`` is true, we attempt to load the index from a
388 If ``trypending`` is true, we attempt to load the index from a
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
390 The ``00changelog.i.a`` file contains index (and possibly inline
390 The ``00changelog.i.a`` file contains index (and possibly inline
391 revision) data for a transaction that hasn't been finalized yet.
391 revision) data for a transaction that hasn't been finalized yet.
392 It exists in a separate file to facilitate readers (such as
392 It exists in a separate file to facilitate readers (such as
393 hooks processes) accessing data before a transaction is finalized.
393 hooks processes) accessing data before a transaction is finalized.
394
394
395 ``concurrencychecker`` will be passed to the revlog init function, see
395 ``concurrencychecker`` will be passed to the revlog init function, see
396 the documentation there.
396 the documentation there.
397 """
397 """
398
398
399 indexfile = b'00changelog.i'
400 if trypending and opener.exists(b'00changelog.i.a'):
399 if trypending and opener.exists(b'00changelog.i.a'):
401 postfix = b'a'
400 postfix = b'a'
402 else:
401 else:
403 postfix = None
402 postfix = None
404
403
405 datafile = b'00changelog.d'
406 revlog.revlog.__init__(
404 revlog.revlog.__init__(
407 self,
405 self,
408 opener,
406 opener,
409 target=(revlog_constants.KIND_CHANGELOG, None),
407 target=(revlog_constants.KIND_CHANGELOG, None),
408 radix=b'00changelog',
410 postfix=postfix,
409 postfix=postfix,
411 indexfile=indexfile,
412 datafile=datafile,
413 checkambig=True,
410 checkambig=True,
414 mmaplargeindex=True,
411 mmaplargeindex=True,
415 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
412 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
416 concurrencychecker=concurrencychecker,
413 concurrencychecker=concurrencychecker,
417 )
414 )
418
415
419 if self._initempty and (self._format_version == revlog.REVLOGV1):
416 if self._initempty and (self._format_version == revlog.REVLOGV1):
420 # changelogs don't benefit from generaldelta.
417 # changelogs don't benefit from generaldelta.
421
418
422 self._format_flags &= ~revlog.FLAG_GENERALDELTA
419 self._format_flags &= ~revlog.FLAG_GENERALDELTA
423 self._generaldelta = False
420 self._generaldelta = False
424
421
425 # Delta chains for changelogs tend to be very small because entries
422 # Delta chains for changelogs tend to be very small because entries
426 # tend to be small and don't delta well with each. So disable delta
423 # tend to be small and don't delta well with each. So disable delta
427 # chains.
424 # chains.
428 self._storedeltachains = False
425 self._storedeltachains = False
429
426
430 self._realopener = opener
427 self._realopener = opener
431 self._delayed = False
428 self._delayed = False
432 self._delaybuf = None
429 self._delaybuf = None
433 self._divert = False
430 self._divert = False
434 self._filteredrevs = frozenset()
431 self._filteredrevs = frozenset()
435 self._filteredrevs_hashcache = {}
432 self._filteredrevs_hashcache = {}
436 self._copiesstorage = opener.options.get(b'copies-storage')
433 self._copiesstorage = opener.options.get(b'copies-storage')
437
434
438 @property
435 @property
439 def filteredrevs(self):
436 def filteredrevs(self):
440 return self._filteredrevs
437 return self._filteredrevs
441
438
442 @filteredrevs.setter
439 @filteredrevs.setter
443 def filteredrevs(self, val):
440 def filteredrevs(self, val):
444 # Ensure all updates go through this function
441 # Ensure all updates go through this function
445 assert isinstance(val, frozenset)
442 assert isinstance(val, frozenset)
446 self._filteredrevs = val
443 self._filteredrevs = val
447 self._filteredrevs_hashcache = {}
444 self._filteredrevs_hashcache = {}
448
445
449 def delayupdate(self, tr):
446 def delayupdate(self, tr):
450 """delay visibility of index updates to other readers"""
447 """delay visibility of index updates to other readers"""
451
448
452 if not self._delayed:
449 if not self._delayed:
453 if len(self) == 0:
450 if len(self) == 0:
454 self._divert = True
451 self._divert = True
455 if self._realopener.exists(self._indexfile + b'.a'):
452 if self._realopener.exists(self._indexfile + b'.a'):
456 self._realopener.unlink(self._indexfile + b'.a')
453 self._realopener.unlink(self._indexfile + b'.a')
457 self.opener = _divertopener(self._realopener, self._indexfile)
454 self.opener = _divertopener(self._realopener, self._indexfile)
458 else:
455 else:
459 self._delaybuf = []
456 self._delaybuf = []
460 self.opener = _delayopener(
457 self.opener = _delayopener(
461 self._realopener, self._indexfile, self._delaybuf
458 self._realopener, self._indexfile, self._delaybuf
462 )
459 )
463 self._delayed = True
460 self._delayed = True
464 tr.addpending(b'cl-%i' % id(self), self._writepending)
461 tr.addpending(b'cl-%i' % id(self), self._writepending)
465 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
462 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
466
463
467 def _finalize(self, tr):
464 def _finalize(self, tr):
468 """finalize index updates"""
465 """finalize index updates"""
469 self._delayed = False
466 self._delayed = False
470 self.opener = self._realopener
467 self.opener = self._realopener
471 # move redirected index data back into place
468 # move redirected index data back into place
472 if self._divert:
469 if self._divert:
473 assert not self._delaybuf
470 assert not self._delaybuf
474 tmpname = self._indexfile + b".a"
471 tmpname = self._indexfile + b".a"
475 nfile = self.opener.open(tmpname)
472 nfile = self.opener.open(tmpname)
476 nfile.close()
473 nfile.close()
477 self.opener.rename(tmpname, self._indexfile, checkambig=True)
474 self.opener.rename(tmpname, self._indexfile, checkambig=True)
478 elif self._delaybuf:
475 elif self._delaybuf:
479 fp = self.opener(self._indexfile, b'a', checkambig=True)
476 fp = self.opener(self._indexfile, b'a', checkambig=True)
480 fp.write(b"".join(self._delaybuf))
477 fp.write(b"".join(self._delaybuf))
481 fp.close()
478 fp.close()
482 self._delaybuf = None
479 self._delaybuf = None
483 self._divert = False
480 self._divert = False
484 # split when we're done
481 # split when we're done
485 self._enforceinlinesize(tr)
482 self._enforceinlinesize(tr)
486
483
487 def _writepending(self, tr):
484 def _writepending(self, tr):
488 """create a file containing the unfinalized state for
485 """create a file containing the unfinalized state for
489 pretxnchangegroup"""
486 pretxnchangegroup"""
490 if self._delaybuf:
487 if self._delaybuf:
491 # make a temporary copy of the index
488 # make a temporary copy of the index
492 fp1 = self._realopener(self._indexfile)
489 fp1 = self._realopener(self._indexfile)
493 pendingfilename = self._indexfile + b".a"
490 pendingfilename = self._indexfile + b".a"
494 # register as a temp file to ensure cleanup on failure
491 # register as a temp file to ensure cleanup on failure
495 tr.registertmp(pendingfilename)
492 tr.registertmp(pendingfilename)
496 # write existing data
493 # write existing data
497 fp2 = self._realopener(pendingfilename, b"w")
494 fp2 = self._realopener(pendingfilename, b"w")
498 fp2.write(fp1.read())
495 fp2.write(fp1.read())
499 # add pending data
496 # add pending data
500 fp2.write(b"".join(self._delaybuf))
497 fp2.write(b"".join(self._delaybuf))
501 fp2.close()
498 fp2.close()
502 # switch modes so finalize can simply rename
499 # switch modes so finalize can simply rename
503 self._delaybuf = None
500 self._delaybuf = None
504 self._divert = True
501 self._divert = True
505 self.opener = _divertopener(self._realopener, self._indexfile)
502 self.opener = _divertopener(self._realopener, self._indexfile)
506
503
507 if self._divert:
504 if self._divert:
508 return True
505 return True
509
506
510 return False
507 return False
511
508
512 def _enforceinlinesize(self, tr, fp=None):
509 def _enforceinlinesize(self, tr, fp=None):
513 if not self._delayed:
510 if not self._delayed:
514 revlog.revlog._enforceinlinesize(self, tr, fp)
511 revlog.revlog._enforceinlinesize(self, tr, fp)
515
512
516 def read(self, nodeorrev):
513 def read(self, nodeorrev):
517 """Obtain data from a parsed changelog revision.
514 """Obtain data from a parsed changelog revision.
518
515
519 Returns a 6-tuple of:
516 Returns a 6-tuple of:
520
517
521 - manifest node in binary
518 - manifest node in binary
522 - author/user as a localstr
519 - author/user as a localstr
523 - date as a 2-tuple of (time, timezone)
520 - date as a 2-tuple of (time, timezone)
524 - list of files
521 - list of files
525 - commit message as a localstr
522 - commit message as a localstr
526 - dict of extra metadata
523 - dict of extra metadata
527
524
528 Unless you need to access all fields, consider calling
525 Unless you need to access all fields, consider calling
529 ``changelogrevision`` instead, as it is faster for partial object
526 ``changelogrevision`` instead, as it is faster for partial object
530 access.
527 access.
531 """
528 """
532 d, s = self._revisiondata(nodeorrev)
529 d, s = self._revisiondata(nodeorrev)
533 c = changelogrevision(
530 c = changelogrevision(
534 self, d, s, self._copiesstorage == b'changeset-sidedata'
531 self, d, s, self._copiesstorage == b'changeset-sidedata'
535 )
532 )
536 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
533 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
537
534
538 def changelogrevision(self, nodeorrev):
535 def changelogrevision(self, nodeorrev):
539 """Obtain a ``changelogrevision`` for a node or revision."""
536 """Obtain a ``changelogrevision`` for a node or revision."""
540 text, sidedata = self._revisiondata(nodeorrev)
537 text, sidedata = self._revisiondata(nodeorrev)
541 return changelogrevision(
538 return changelogrevision(
542 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
539 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
543 )
540 )
544
541
545 def readfiles(self, nodeorrev):
542 def readfiles(self, nodeorrev):
546 """
543 """
547 short version of read that only returns the files modified by the cset
544 short version of read that only returns the files modified by the cset
548 """
545 """
549 text = self.revision(nodeorrev)
546 text = self.revision(nodeorrev)
550 if not text:
547 if not text:
551 return []
548 return []
552 last = text.index(b"\n\n")
549 last = text.index(b"\n\n")
553 l = text[:last].split(b'\n')
550 l = text[:last].split(b'\n')
554 return l[3:]
551 return l[3:]
555
552
556 def add(
553 def add(
557 self,
554 self,
558 manifest,
555 manifest,
559 files,
556 files,
560 desc,
557 desc,
561 transaction,
558 transaction,
562 p1,
559 p1,
563 p2,
560 p2,
564 user,
561 user,
565 date=None,
562 date=None,
566 extra=None,
563 extra=None,
567 ):
564 ):
568 # Convert to UTF-8 encoded bytestrings as the very first
565 # Convert to UTF-8 encoded bytestrings as the very first
569 # thing: calling any method on a localstr object will turn it
566 # thing: calling any method on a localstr object will turn it
570 # into a str object and the cached UTF-8 string is thus lost.
567 # into a str object and the cached UTF-8 string is thus lost.
571 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
568 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
572
569
573 user = user.strip()
570 user = user.strip()
574 # An empty username or a username with a "\n" will make the
571 # An empty username or a username with a "\n" will make the
575 # revision text contain two "\n\n" sequences -> corrupt
572 # revision text contain two "\n\n" sequences -> corrupt
576 # repository since read cannot unpack the revision.
573 # repository since read cannot unpack the revision.
577 if not user:
574 if not user:
578 raise error.StorageError(_(b"empty username"))
575 raise error.StorageError(_(b"empty username"))
579 if b"\n" in user:
576 if b"\n" in user:
580 raise error.StorageError(
577 raise error.StorageError(
581 _(b"username %r contains a newline") % pycompat.bytestr(user)
578 _(b"username %r contains a newline") % pycompat.bytestr(user)
582 )
579 )
583
580
584 desc = stripdesc(desc)
581 desc = stripdesc(desc)
585
582
586 if date:
583 if date:
587 parseddate = b"%d %d" % dateutil.parsedate(date)
584 parseddate = b"%d %d" % dateutil.parsedate(date)
588 else:
585 else:
589 parseddate = b"%d %d" % dateutil.makedate()
586 parseddate = b"%d %d" % dateutil.makedate()
590 if extra:
587 if extra:
591 branch = extra.get(b"branch")
588 branch = extra.get(b"branch")
592 if branch in (b"default", b""):
589 if branch in (b"default", b""):
593 del extra[b"branch"]
590 del extra[b"branch"]
594 elif branch in (b".", b"null", b"tip"):
591 elif branch in (b".", b"null", b"tip"):
595 raise error.StorageError(
592 raise error.StorageError(
596 _(b'the name \'%s\' is reserved') % branch
593 _(b'the name \'%s\' is reserved') % branch
597 )
594 )
598 sortedfiles = sorted(files.touched)
595 sortedfiles = sorted(files.touched)
599 flags = 0
596 flags = 0
600 sidedata = None
597 sidedata = None
601 if self._copiesstorage == b'changeset-sidedata':
598 if self._copiesstorage == b'changeset-sidedata':
602 if files.has_copies_info:
599 if files.has_copies_info:
603 flags |= flagutil.REVIDX_HASCOPIESINFO
600 flags |= flagutil.REVIDX_HASCOPIESINFO
604 sidedata = metadata.encode_files_sidedata(files)
601 sidedata = metadata.encode_files_sidedata(files)
605
602
606 if extra:
603 if extra:
607 extra = encodeextra(extra)
604 extra = encodeextra(extra)
608 parseddate = b"%s %s" % (parseddate, extra)
605 parseddate = b"%s %s" % (parseddate, extra)
609 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
606 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
610 text = b"\n".join(l)
607 text = b"\n".join(l)
611 rev = self.addrevision(
608 rev = self.addrevision(
612 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
609 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
613 )
610 )
614 return self.node(rev)
611 return self.node(rev)
615
612
616 def branchinfo(self, rev):
613 def branchinfo(self, rev):
617 """return the branch name and open/close state of a revision
614 """return the branch name and open/close state of a revision
618
615
619 This function exists because creating a changectx object
616 This function exists because creating a changectx object
620 just to access this is costly."""
617 just to access this is costly."""
621 return self.changelogrevision(rev).branchinfo
618 return self.changelogrevision(rev).branchinfo
622
619
623 def _nodeduplicatecallback(self, transaction, rev):
620 def _nodeduplicatecallback(self, transaction, rev):
624 # keep track of revisions that got "re-added", eg: unbunde of know rev.
621 # keep track of revisions that got "re-added", eg: unbunde of know rev.
625 #
622 #
626 # We track them in a list to preserve their order from the source bundle
623 # We track them in a list to preserve their order from the source bundle
627 duplicates = transaction.changes.setdefault(b'revduplicates', [])
624 duplicates = transaction.changes.setdefault(b'revduplicates', [])
628 duplicates.append(rev)
625 duplicates.append(rev)
@@ -1,3931 +1,3931 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy as copymod
10 import copy as copymod
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 setattr,
24 setattr,
25 )
25 )
26 from .thirdparty import attr
26 from .thirdparty import attr
27
27
28 from . import (
28 from . import (
29 bookmarks,
29 bookmarks,
30 changelog,
30 changelog,
31 copies,
31 copies,
32 crecord as crecordmod,
32 crecord as crecordmod,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 formatter,
36 formatter,
37 logcmdutil,
37 logcmdutil,
38 match as matchmod,
38 match as matchmod,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 mergeutil,
41 mergeutil,
42 obsolete,
42 obsolete,
43 patch,
43 patch,
44 pathutil,
44 pathutil,
45 phases,
45 phases,
46 pycompat,
46 pycompat,
47 repair,
47 repair,
48 revlog,
48 revlog,
49 rewriteutil,
49 rewriteutil,
50 scmutil,
50 scmutil,
51 state as statemod,
51 state as statemod,
52 subrepoutil,
52 subrepoutil,
53 templatekw,
53 templatekw,
54 templater,
54 templater,
55 util,
55 util,
56 vfs as vfsmod,
56 vfs as vfsmod,
57 )
57 )
58
58
59 from .utils import (
59 from .utils import (
60 dateutil,
60 dateutil,
61 stringutil,
61 stringutil,
62 )
62 )
63
63
64 from .revlogutils import (
64 from .revlogutils import (
65 constants as revlog_constants,
65 constants as revlog_constants,
66 )
66 )
67
67
68 if pycompat.TYPE_CHECKING:
68 if pycompat.TYPE_CHECKING:
69 from typing import (
69 from typing import (
70 Any,
70 Any,
71 Dict,
71 Dict,
72 )
72 )
73
73
74 for t in (Any, Dict):
74 for t in (Any, Dict):
75 assert t
75 assert t
76
76
77 stringio = util.stringio
77 stringio = util.stringio
78
78
79 # templates of common command options
79 # templates of common command options
80
80
81 dryrunopts = [
81 dryrunopts = [
82 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
82 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
83 ]
83 ]
84
84
85 confirmopts = [
85 confirmopts = [
86 (b'', b'confirm', None, _(b'ask before applying actions')),
86 (b'', b'confirm', None, _(b'ask before applying actions')),
87 ]
87 ]
88
88
89 remoteopts = [
89 remoteopts = [
90 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
90 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
91 (
91 (
92 b'',
92 b'',
93 b'remotecmd',
93 b'remotecmd',
94 b'',
94 b'',
95 _(b'specify hg command to run on the remote side'),
95 _(b'specify hg command to run on the remote side'),
96 _(b'CMD'),
96 _(b'CMD'),
97 ),
97 ),
98 (
98 (
99 b'',
99 b'',
100 b'insecure',
100 b'insecure',
101 None,
101 None,
102 _(b'do not verify server certificate (ignoring web.cacerts config)'),
102 _(b'do not verify server certificate (ignoring web.cacerts config)'),
103 ),
103 ),
104 ]
104 ]
105
105
106 walkopts = [
106 walkopts = [
107 (
107 (
108 b'I',
108 b'I',
109 b'include',
109 b'include',
110 [],
110 [],
111 _(b'include names matching the given patterns'),
111 _(b'include names matching the given patterns'),
112 _(b'PATTERN'),
112 _(b'PATTERN'),
113 ),
113 ),
114 (
114 (
115 b'X',
115 b'X',
116 b'exclude',
116 b'exclude',
117 [],
117 [],
118 _(b'exclude names matching the given patterns'),
118 _(b'exclude names matching the given patterns'),
119 _(b'PATTERN'),
119 _(b'PATTERN'),
120 ),
120 ),
121 ]
121 ]
122
122
123 commitopts = [
123 commitopts = [
124 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
124 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
125 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
125 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
126 ]
126 ]
127
127
128 commitopts2 = [
128 commitopts2 = [
129 (
129 (
130 b'd',
130 b'd',
131 b'date',
131 b'date',
132 b'',
132 b'',
133 _(b'record the specified date as commit date'),
133 _(b'record the specified date as commit date'),
134 _(b'DATE'),
134 _(b'DATE'),
135 ),
135 ),
136 (
136 (
137 b'u',
137 b'u',
138 b'user',
138 b'user',
139 b'',
139 b'',
140 _(b'record the specified user as committer'),
140 _(b'record the specified user as committer'),
141 _(b'USER'),
141 _(b'USER'),
142 ),
142 ),
143 ]
143 ]
144
144
145 commitopts3 = [
145 commitopts3 = [
146 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
146 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
147 (b'U', b'currentuser', None, _(b'record the current user as committer')),
147 (b'U', b'currentuser', None, _(b'record the current user as committer')),
148 ]
148 ]
149
149
150 formatteropts = [
150 formatteropts = [
151 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
151 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
152 ]
152 ]
153
153
154 templateopts = [
154 templateopts = [
155 (
155 (
156 b'',
156 b'',
157 b'style',
157 b'style',
158 b'',
158 b'',
159 _(b'display using template map file (DEPRECATED)'),
159 _(b'display using template map file (DEPRECATED)'),
160 _(b'STYLE'),
160 _(b'STYLE'),
161 ),
161 ),
162 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
162 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
163 ]
163 ]
164
164
165 logopts = [
165 logopts = [
166 (b'p', b'patch', None, _(b'show patch')),
166 (b'p', b'patch', None, _(b'show patch')),
167 (b'g', b'git', None, _(b'use git extended diff format')),
167 (b'g', b'git', None, _(b'use git extended diff format')),
168 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
168 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
169 (b'M', b'no-merges', None, _(b'do not show merges')),
169 (b'M', b'no-merges', None, _(b'do not show merges')),
170 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
170 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
171 (b'G', b'graph', None, _(b"show the revision DAG")),
171 (b'G', b'graph', None, _(b"show the revision DAG")),
172 ] + templateopts
172 ] + templateopts
173
173
174 diffopts = [
174 diffopts = [
175 (b'a', b'text', None, _(b'treat all files as text')),
175 (b'a', b'text', None, _(b'treat all files as text')),
176 (
176 (
177 b'g',
177 b'g',
178 b'git',
178 b'git',
179 None,
179 None,
180 _(b'use git extended diff format (DEFAULT: diff.git)'),
180 _(b'use git extended diff format (DEFAULT: diff.git)'),
181 ),
181 ),
182 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
182 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
183 (b'', b'nodates', None, _(b'omit dates from diff headers')),
183 (b'', b'nodates', None, _(b'omit dates from diff headers')),
184 ]
184 ]
185
185
186 diffwsopts = [
186 diffwsopts = [
187 (
187 (
188 b'w',
188 b'w',
189 b'ignore-all-space',
189 b'ignore-all-space',
190 None,
190 None,
191 _(b'ignore white space when comparing lines'),
191 _(b'ignore white space when comparing lines'),
192 ),
192 ),
193 (
193 (
194 b'b',
194 b'b',
195 b'ignore-space-change',
195 b'ignore-space-change',
196 None,
196 None,
197 _(b'ignore changes in the amount of white space'),
197 _(b'ignore changes in the amount of white space'),
198 ),
198 ),
199 (
199 (
200 b'B',
200 b'B',
201 b'ignore-blank-lines',
201 b'ignore-blank-lines',
202 None,
202 None,
203 _(b'ignore changes whose lines are all blank'),
203 _(b'ignore changes whose lines are all blank'),
204 ),
204 ),
205 (
205 (
206 b'Z',
206 b'Z',
207 b'ignore-space-at-eol',
207 b'ignore-space-at-eol',
208 None,
208 None,
209 _(b'ignore changes in whitespace at EOL'),
209 _(b'ignore changes in whitespace at EOL'),
210 ),
210 ),
211 ]
211 ]
212
212
213 diffopts2 = (
213 diffopts2 = (
214 [
214 [
215 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
215 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
216 (
216 (
217 b'p',
217 b'p',
218 b'show-function',
218 b'show-function',
219 None,
219 None,
220 _(
220 _(
221 b'show which function each change is in (DEFAULT: diff.showfunc)'
221 b'show which function each change is in (DEFAULT: diff.showfunc)'
222 ),
222 ),
223 ),
223 ),
224 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
224 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
225 ]
225 ]
226 + diffwsopts
226 + diffwsopts
227 + [
227 + [
228 (
228 (
229 b'U',
229 b'U',
230 b'unified',
230 b'unified',
231 b'',
231 b'',
232 _(b'number of lines of context to show'),
232 _(b'number of lines of context to show'),
233 _(b'NUM'),
233 _(b'NUM'),
234 ),
234 ),
235 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
235 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
236 (
236 (
237 b'',
237 b'',
238 b'root',
238 b'root',
239 b'',
239 b'',
240 _(b'produce diffs relative to subdirectory'),
240 _(b'produce diffs relative to subdirectory'),
241 _(b'DIR'),
241 _(b'DIR'),
242 ),
242 ),
243 ]
243 ]
244 )
244 )
245
245
246 mergetoolopts = [
246 mergetoolopts = [
247 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
247 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
248 ]
248 ]
249
249
250 similarityopts = [
250 similarityopts = [
251 (
251 (
252 b's',
252 b's',
253 b'similarity',
253 b'similarity',
254 b'',
254 b'',
255 _(b'guess renamed files by similarity (0<=s<=100)'),
255 _(b'guess renamed files by similarity (0<=s<=100)'),
256 _(b'SIMILARITY'),
256 _(b'SIMILARITY'),
257 )
257 )
258 ]
258 ]
259
259
260 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
260 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
261
261
262 debugrevlogopts = [
262 debugrevlogopts = [
263 (b'c', b'changelog', False, _(b'open changelog')),
263 (b'c', b'changelog', False, _(b'open changelog')),
264 (b'm', b'manifest', False, _(b'open manifest')),
264 (b'm', b'manifest', False, _(b'open manifest')),
265 (b'', b'dir', b'', _(b'open directory manifest')),
265 (b'', b'dir', b'', _(b'open directory manifest')),
266 ]
266 ]
267
267
268 # special string such that everything below this line will be ingored in the
268 # special string such that everything below this line will be ingored in the
269 # editor text
269 # editor text
270 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
270 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
271
271
272
272
273 def check_at_most_one_arg(opts, *args):
273 def check_at_most_one_arg(opts, *args):
274 """abort if more than one of the arguments are in opts
274 """abort if more than one of the arguments are in opts
275
275
276 Returns the unique argument or None if none of them were specified.
276 Returns the unique argument or None if none of them were specified.
277 """
277 """
278
278
279 def to_display(name):
279 def to_display(name):
280 return pycompat.sysbytes(name).replace(b'_', b'-')
280 return pycompat.sysbytes(name).replace(b'_', b'-')
281
281
282 previous = None
282 previous = None
283 for x in args:
283 for x in args:
284 if opts.get(x):
284 if opts.get(x):
285 if previous:
285 if previous:
286 raise error.InputError(
286 raise error.InputError(
287 _(b'cannot specify both --%s and --%s')
287 _(b'cannot specify both --%s and --%s')
288 % (to_display(previous), to_display(x))
288 % (to_display(previous), to_display(x))
289 )
289 )
290 previous = x
290 previous = x
291 return previous
291 return previous
292
292
293
293
294 def check_incompatible_arguments(opts, first, others):
294 def check_incompatible_arguments(opts, first, others):
295 """abort if the first argument is given along with any of the others
295 """abort if the first argument is given along with any of the others
296
296
297 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
297 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
298 among themselves, and they're passed as a single collection.
298 among themselves, and they're passed as a single collection.
299 """
299 """
300 for other in others:
300 for other in others:
301 check_at_most_one_arg(opts, first, other)
301 check_at_most_one_arg(opts, first, other)
302
302
303
303
304 def resolvecommitoptions(ui, opts):
304 def resolvecommitoptions(ui, opts):
305 """modify commit options dict to handle related options
305 """modify commit options dict to handle related options
306
306
307 The return value indicates that ``rewrite.update-timestamp`` is the reason
307 The return value indicates that ``rewrite.update-timestamp`` is the reason
308 the ``date`` option is set.
308 the ``date`` option is set.
309 """
309 """
310 check_at_most_one_arg(opts, b'date', b'currentdate')
310 check_at_most_one_arg(opts, b'date', b'currentdate')
311 check_at_most_one_arg(opts, b'user', b'currentuser')
311 check_at_most_one_arg(opts, b'user', b'currentuser')
312
312
313 datemaydiffer = False # date-only change should be ignored?
313 datemaydiffer = False # date-only change should be ignored?
314
314
315 if opts.get(b'currentdate'):
315 if opts.get(b'currentdate'):
316 opts[b'date'] = b'%d %d' % dateutil.makedate()
316 opts[b'date'] = b'%d %d' % dateutil.makedate()
317 elif (
317 elif (
318 not opts.get(b'date')
318 not opts.get(b'date')
319 and ui.configbool(b'rewrite', b'update-timestamp')
319 and ui.configbool(b'rewrite', b'update-timestamp')
320 and opts.get(b'currentdate') is None
320 and opts.get(b'currentdate') is None
321 ):
321 ):
322 opts[b'date'] = b'%d %d' % dateutil.makedate()
322 opts[b'date'] = b'%d %d' % dateutil.makedate()
323 datemaydiffer = True
323 datemaydiffer = True
324
324
325 if opts.get(b'currentuser'):
325 if opts.get(b'currentuser'):
326 opts[b'user'] = ui.username()
326 opts[b'user'] = ui.username()
327
327
328 return datemaydiffer
328 return datemaydiffer
329
329
330
330
331 def checknotesize(ui, opts):
331 def checknotesize(ui, opts):
332 """make sure note is of valid format"""
332 """make sure note is of valid format"""
333
333
334 note = opts.get(b'note')
334 note = opts.get(b'note')
335 if not note:
335 if not note:
336 return
336 return
337
337
338 if len(note) > 255:
338 if len(note) > 255:
339 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
339 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
340 if b'\n' in note:
340 if b'\n' in note:
341 raise error.InputError(_(b"note cannot contain a newline"))
341 raise error.InputError(_(b"note cannot contain a newline"))
342
342
343
343
344 def ishunk(x):
344 def ishunk(x):
345 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
345 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
346 return isinstance(x, hunkclasses)
346 return isinstance(x, hunkclasses)
347
347
348
348
349 def newandmodified(chunks, originalchunks):
349 def newandmodified(chunks, originalchunks):
350 newlyaddedandmodifiedfiles = set()
350 newlyaddedandmodifiedfiles = set()
351 alsorestore = set()
351 alsorestore = set()
352 for chunk in chunks:
352 for chunk in chunks:
353 if (
353 if (
354 ishunk(chunk)
354 ishunk(chunk)
355 and chunk.header.isnewfile()
355 and chunk.header.isnewfile()
356 and chunk not in originalchunks
356 and chunk not in originalchunks
357 ):
357 ):
358 newlyaddedandmodifiedfiles.add(chunk.header.filename())
358 newlyaddedandmodifiedfiles.add(chunk.header.filename())
359 alsorestore.update(
359 alsorestore.update(
360 set(chunk.header.files()) - {chunk.header.filename()}
360 set(chunk.header.files()) - {chunk.header.filename()}
361 )
361 )
362 return newlyaddedandmodifiedfiles, alsorestore
362 return newlyaddedandmodifiedfiles, alsorestore
363
363
364
364
365 def parsealiases(cmd):
365 def parsealiases(cmd):
366 base_aliases = cmd.split(b"|")
366 base_aliases = cmd.split(b"|")
367 all_aliases = set(base_aliases)
367 all_aliases = set(base_aliases)
368 extra_aliases = []
368 extra_aliases = []
369 for alias in base_aliases:
369 for alias in base_aliases:
370 if b'-' in alias:
370 if b'-' in alias:
371 folded_alias = alias.replace(b'-', b'')
371 folded_alias = alias.replace(b'-', b'')
372 if folded_alias not in all_aliases:
372 if folded_alias not in all_aliases:
373 all_aliases.add(folded_alias)
373 all_aliases.add(folded_alias)
374 extra_aliases.append(folded_alias)
374 extra_aliases.append(folded_alias)
375 base_aliases.extend(extra_aliases)
375 base_aliases.extend(extra_aliases)
376 return base_aliases
376 return base_aliases
377
377
378
378
379 def setupwrapcolorwrite(ui):
379 def setupwrapcolorwrite(ui):
380 # wrap ui.write so diff output can be labeled/colorized
380 # wrap ui.write so diff output can be labeled/colorized
381 def wrapwrite(orig, *args, **kw):
381 def wrapwrite(orig, *args, **kw):
382 label = kw.pop('label', b'')
382 label = kw.pop('label', b'')
383 for chunk, l in patch.difflabel(lambda: args):
383 for chunk, l in patch.difflabel(lambda: args):
384 orig(chunk, label=label + l)
384 orig(chunk, label=label + l)
385
385
386 oldwrite = ui.write
386 oldwrite = ui.write
387
387
388 def wrap(*args, **kwargs):
388 def wrap(*args, **kwargs):
389 return wrapwrite(oldwrite, *args, **kwargs)
389 return wrapwrite(oldwrite, *args, **kwargs)
390
390
391 setattr(ui, 'write', wrap)
391 setattr(ui, 'write', wrap)
392 return oldwrite
392 return oldwrite
393
393
394
394
395 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
395 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
396 try:
396 try:
397 if usecurses:
397 if usecurses:
398 if testfile:
398 if testfile:
399 recordfn = crecordmod.testdecorator(
399 recordfn = crecordmod.testdecorator(
400 testfile, crecordmod.testchunkselector
400 testfile, crecordmod.testchunkselector
401 )
401 )
402 else:
402 else:
403 recordfn = crecordmod.chunkselector
403 recordfn = crecordmod.chunkselector
404
404
405 return crecordmod.filterpatch(
405 return crecordmod.filterpatch(
406 ui, originalhunks, recordfn, operation
406 ui, originalhunks, recordfn, operation
407 )
407 )
408 except crecordmod.fallbackerror as e:
408 except crecordmod.fallbackerror as e:
409 ui.warn(b'%s\n' % e)
409 ui.warn(b'%s\n' % e)
410 ui.warn(_(b'falling back to text mode\n'))
410 ui.warn(_(b'falling back to text mode\n'))
411
411
412 return patch.filterpatch(ui, originalhunks, match, operation)
412 return patch.filterpatch(ui, originalhunks, match, operation)
413
413
414
414
415 def recordfilter(ui, originalhunks, match, operation=None):
415 def recordfilter(ui, originalhunks, match, operation=None):
416 """Prompts the user to filter the originalhunks and return a list of
416 """Prompts the user to filter the originalhunks and return a list of
417 selected hunks.
417 selected hunks.
418 *operation* is used for to build ui messages to indicate the user what
418 *operation* is used for to build ui messages to indicate the user what
419 kind of filtering they are doing: reverting, committing, shelving, etc.
419 kind of filtering they are doing: reverting, committing, shelving, etc.
420 (see patch.filterpatch).
420 (see patch.filterpatch).
421 """
421 """
422 usecurses = crecordmod.checkcurses(ui)
422 usecurses = crecordmod.checkcurses(ui)
423 testfile = ui.config(b'experimental', b'crecordtest')
423 testfile = ui.config(b'experimental', b'crecordtest')
424 oldwrite = setupwrapcolorwrite(ui)
424 oldwrite = setupwrapcolorwrite(ui)
425 try:
425 try:
426 newchunks, newopts = filterchunks(
426 newchunks, newopts = filterchunks(
427 ui, originalhunks, usecurses, testfile, match, operation
427 ui, originalhunks, usecurses, testfile, match, operation
428 )
428 )
429 finally:
429 finally:
430 ui.write = oldwrite
430 ui.write = oldwrite
431 return newchunks, newopts
431 return newchunks, newopts
432
432
433
433
434 def dorecord(
434 def dorecord(
435 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
435 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
436 ):
436 ):
437 opts = pycompat.byteskwargs(opts)
437 opts = pycompat.byteskwargs(opts)
438 if not ui.interactive():
438 if not ui.interactive():
439 if cmdsuggest:
439 if cmdsuggest:
440 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
440 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
441 else:
441 else:
442 msg = _(b'running non-interactively')
442 msg = _(b'running non-interactively')
443 raise error.InputError(msg)
443 raise error.InputError(msg)
444
444
445 # make sure username is set before going interactive
445 # make sure username is set before going interactive
446 if not opts.get(b'user'):
446 if not opts.get(b'user'):
447 ui.username() # raise exception, username not provided
447 ui.username() # raise exception, username not provided
448
448
449 def recordfunc(ui, repo, message, match, opts):
449 def recordfunc(ui, repo, message, match, opts):
450 """This is generic record driver.
450 """This is generic record driver.
451
451
452 Its job is to interactively filter local changes, and
452 Its job is to interactively filter local changes, and
453 accordingly prepare working directory into a state in which the
453 accordingly prepare working directory into a state in which the
454 job can be delegated to a non-interactive commit command such as
454 job can be delegated to a non-interactive commit command such as
455 'commit' or 'qrefresh'.
455 'commit' or 'qrefresh'.
456
456
457 After the actual job is done by non-interactive command, the
457 After the actual job is done by non-interactive command, the
458 working directory is restored to its original state.
458 working directory is restored to its original state.
459
459
460 In the end we'll record interesting changes, and everything else
460 In the end we'll record interesting changes, and everything else
461 will be left in place, so the user can continue working.
461 will be left in place, so the user can continue working.
462 """
462 """
463 if not opts.get(b'interactive-unshelve'):
463 if not opts.get(b'interactive-unshelve'):
464 checkunfinished(repo, commit=True)
464 checkunfinished(repo, commit=True)
465 wctx = repo[None]
465 wctx = repo[None]
466 merge = len(wctx.parents()) > 1
466 merge = len(wctx.parents()) > 1
467 if merge:
467 if merge:
468 raise error.InputError(
468 raise error.InputError(
469 _(
469 _(
470 b'cannot partially commit a merge '
470 b'cannot partially commit a merge '
471 b'(use "hg commit" instead)'
471 b'(use "hg commit" instead)'
472 )
472 )
473 )
473 )
474
474
475 def fail(f, msg):
475 def fail(f, msg):
476 raise error.InputError(b'%s: %s' % (f, msg))
476 raise error.InputError(b'%s: %s' % (f, msg))
477
477
478 force = opts.get(b'force')
478 force = opts.get(b'force')
479 if not force:
479 if not force:
480 match = matchmod.badmatch(match, fail)
480 match = matchmod.badmatch(match, fail)
481
481
482 status = repo.status(match=match)
482 status = repo.status(match=match)
483
483
484 overrides = {(b'ui', b'commitsubrepos'): True}
484 overrides = {(b'ui', b'commitsubrepos'): True}
485
485
486 with repo.ui.configoverride(overrides, b'record'):
486 with repo.ui.configoverride(overrides, b'record'):
487 # subrepoutil.precommit() modifies the status
487 # subrepoutil.precommit() modifies the status
488 tmpstatus = scmutil.status(
488 tmpstatus = scmutil.status(
489 copymod.copy(status.modified),
489 copymod.copy(status.modified),
490 copymod.copy(status.added),
490 copymod.copy(status.added),
491 copymod.copy(status.removed),
491 copymod.copy(status.removed),
492 copymod.copy(status.deleted),
492 copymod.copy(status.deleted),
493 copymod.copy(status.unknown),
493 copymod.copy(status.unknown),
494 copymod.copy(status.ignored),
494 copymod.copy(status.ignored),
495 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
495 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
496 )
496 )
497
497
498 # Force allows -X subrepo to skip the subrepo.
498 # Force allows -X subrepo to skip the subrepo.
499 subs, commitsubs, newstate = subrepoutil.precommit(
499 subs, commitsubs, newstate = subrepoutil.precommit(
500 repo.ui, wctx, tmpstatus, match, force=True
500 repo.ui, wctx, tmpstatus, match, force=True
501 )
501 )
502 for s in subs:
502 for s in subs:
503 if s in commitsubs:
503 if s in commitsubs:
504 dirtyreason = wctx.sub(s).dirtyreason(True)
504 dirtyreason = wctx.sub(s).dirtyreason(True)
505 raise error.Abort(dirtyreason)
505 raise error.Abort(dirtyreason)
506
506
507 if not force:
507 if not force:
508 repo.checkcommitpatterns(wctx, match, status, fail)
508 repo.checkcommitpatterns(wctx, match, status, fail)
509 diffopts = patch.difffeatureopts(
509 diffopts = patch.difffeatureopts(
510 ui,
510 ui,
511 opts=opts,
511 opts=opts,
512 whitespace=True,
512 whitespace=True,
513 section=b'commands',
513 section=b'commands',
514 configprefix=b'commit.interactive.',
514 configprefix=b'commit.interactive.',
515 )
515 )
516 diffopts.nodates = True
516 diffopts.nodates = True
517 diffopts.git = True
517 diffopts.git = True
518 diffopts.showfunc = True
518 diffopts.showfunc = True
519 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
519 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
520 originalchunks = patch.parsepatch(originaldiff)
520 originalchunks = patch.parsepatch(originaldiff)
521 match = scmutil.match(repo[None], pats)
521 match = scmutil.match(repo[None], pats)
522
522
523 # 1. filter patch, since we are intending to apply subset of it
523 # 1. filter patch, since we are intending to apply subset of it
524 try:
524 try:
525 chunks, newopts = filterfn(ui, originalchunks, match)
525 chunks, newopts = filterfn(ui, originalchunks, match)
526 except error.PatchError as err:
526 except error.PatchError as err:
527 raise error.InputError(_(b'error parsing patch: %s') % err)
527 raise error.InputError(_(b'error parsing patch: %s') % err)
528 opts.update(newopts)
528 opts.update(newopts)
529
529
530 # We need to keep a backup of files that have been newly added and
530 # We need to keep a backup of files that have been newly added and
531 # modified during the recording process because there is a previous
531 # modified during the recording process because there is a previous
532 # version without the edit in the workdir. We also will need to restore
532 # version without the edit in the workdir. We also will need to restore
533 # files that were the sources of renames so that the patch application
533 # files that were the sources of renames so that the patch application
534 # works.
534 # works.
535 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
535 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
536 chunks, originalchunks
536 chunks, originalchunks
537 )
537 )
538 contenders = set()
538 contenders = set()
539 for h in chunks:
539 for h in chunks:
540 try:
540 try:
541 contenders.update(set(h.files()))
541 contenders.update(set(h.files()))
542 except AttributeError:
542 except AttributeError:
543 pass
543 pass
544
544
545 changed = status.modified + status.added + status.removed
545 changed = status.modified + status.added + status.removed
546 newfiles = [f for f in changed if f in contenders]
546 newfiles = [f for f in changed if f in contenders]
547 if not newfiles:
547 if not newfiles:
548 ui.status(_(b'no changes to record\n'))
548 ui.status(_(b'no changes to record\n'))
549 return 0
549 return 0
550
550
551 modified = set(status.modified)
551 modified = set(status.modified)
552
552
553 # 2. backup changed files, so we can restore them in the end
553 # 2. backup changed files, so we can restore them in the end
554
554
555 if backupall:
555 if backupall:
556 tobackup = changed
556 tobackup = changed
557 else:
557 else:
558 tobackup = [
558 tobackup = [
559 f
559 f
560 for f in newfiles
560 for f in newfiles
561 if f in modified or f in newlyaddedandmodifiedfiles
561 if f in modified or f in newlyaddedandmodifiedfiles
562 ]
562 ]
563 backups = {}
563 backups = {}
564 if tobackup:
564 if tobackup:
565 backupdir = repo.vfs.join(b'record-backups')
565 backupdir = repo.vfs.join(b'record-backups')
566 try:
566 try:
567 os.mkdir(backupdir)
567 os.mkdir(backupdir)
568 except OSError as err:
568 except OSError as err:
569 if err.errno != errno.EEXIST:
569 if err.errno != errno.EEXIST:
570 raise
570 raise
571 try:
571 try:
572 # backup continues
572 # backup continues
573 for f in tobackup:
573 for f in tobackup:
574 fd, tmpname = pycompat.mkstemp(
574 fd, tmpname = pycompat.mkstemp(
575 prefix=os.path.basename(f) + b'.', dir=backupdir
575 prefix=os.path.basename(f) + b'.', dir=backupdir
576 )
576 )
577 os.close(fd)
577 os.close(fd)
578 ui.debug(b'backup %r as %r\n' % (f, tmpname))
578 ui.debug(b'backup %r as %r\n' % (f, tmpname))
579 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
579 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
580 backups[f] = tmpname
580 backups[f] = tmpname
581
581
582 fp = stringio()
582 fp = stringio()
583 for c in chunks:
583 for c in chunks:
584 fname = c.filename()
584 fname = c.filename()
585 if fname in backups:
585 if fname in backups:
586 c.write(fp)
586 c.write(fp)
587 dopatch = fp.tell()
587 dopatch = fp.tell()
588 fp.seek(0)
588 fp.seek(0)
589
589
590 # 2.5 optionally review / modify patch in text editor
590 # 2.5 optionally review / modify patch in text editor
591 if opts.get(b'review', False):
591 if opts.get(b'review', False):
592 patchtext = (
592 patchtext = (
593 crecordmod.diffhelptext
593 crecordmod.diffhelptext
594 + crecordmod.patchhelptext
594 + crecordmod.patchhelptext
595 + fp.read()
595 + fp.read()
596 )
596 )
597 reviewedpatch = ui.edit(
597 reviewedpatch = ui.edit(
598 patchtext, b"", action=b"diff", repopath=repo.path
598 patchtext, b"", action=b"diff", repopath=repo.path
599 )
599 )
600 fp.truncate(0)
600 fp.truncate(0)
601 fp.write(reviewedpatch)
601 fp.write(reviewedpatch)
602 fp.seek(0)
602 fp.seek(0)
603
603
604 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
604 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
605 # 3a. apply filtered patch to clean repo (clean)
605 # 3a. apply filtered patch to clean repo (clean)
606 if backups:
606 if backups:
607 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
607 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
608 mergemod.revert_to(repo[b'.'], matcher=m)
608 mergemod.revert_to(repo[b'.'], matcher=m)
609
609
610 # 3b. (apply)
610 # 3b. (apply)
611 if dopatch:
611 if dopatch:
612 try:
612 try:
613 ui.debug(b'applying patch\n')
613 ui.debug(b'applying patch\n')
614 ui.debug(fp.getvalue())
614 ui.debug(fp.getvalue())
615 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
615 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
616 except error.PatchError as err:
616 except error.PatchError as err:
617 raise error.InputError(pycompat.bytestr(err))
617 raise error.InputError(pycompat.bytestr(err))
618 del fp
618 del fp
619
619
620 # 4. We prepared working directory according to filtered
620 # 4. We prepared working directory according to filtered
621 # patch. Now is the time to delegate the job to
621 # patch. Now is the time to delegate the job to
622 # commit/qrefresh or the like!
622 # commit/qrefresh or the like!
623
623
624 # Make all of the pathnames absolute.
624 # Make all of the pathnames absolute.
625 newfiles = [repo.wjoin(nf) for nf in newfiles]
625 newfiles = [repo.wjoin(nf) for nf in newfiles]
626 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
626 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
627 finally:
627 finally:
628 # 5. finally restore backed-up files
628 # 5. finally restore backed-up files
629 try:
629 try:
630 dirstate = repo.dirstate
630 dirstate = repo.dirstate
631 for realname, tmpname in pycompat.iteritems(backups):
631 for realname, tmpname in pycompat.iteritems(backups):
632 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
632 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
633
633
634 if dirstate[realname] == b'n':
634 if dirstate[realname] == b'n':
635 # without normallookup, restoring timestamp
635 # without normallookup, restoring timestamp
636 # may cause partially committed files
636 # may cause partially committed files
637 # to be treated as unmodified
637 # to be treated as unmodified
638 dirstate.normallookup(realname)
638 dirstate.normallookup(realname)
639
639
640 # copystat=True here and above are a hack to trick any
640 # copystat=True here and above are a hack to trick any
641 # editors that have f open that we haven't modified them.
641 # editors that have f open that we haven't modified them.
642 #
642 #
643 # Also note that this racy as an editor could notice the
643 # Also note that this racy as an editor could notice the
644 # file's mtime before we've finished writing it.
644 # file's mtime before we've finished writing it.
645 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
645 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
646 os.unlink(tmpname)
646 os.unlink(tmpname)
647 if tobackup:
647 if tobackup:
648 os.rmdir(backupdir)
648 os.rmdir(backupdir)
649 except OSError:
649 except OSError:
650 pass
650 pass
651
651
652 def recordinwlock(ui, repo, message, match, opts):
652 def recordinwlock(ui, repo, message, match, opts):
653 with repo.wlock():
653 with repo.wlock():
654 return recordfunc(ui, repo, message, match, opts)
654 return recordfunc(ui, repo, message, match, opts)
655
655
656 return commit(ui, repo, recordinwlock, pats, opts)
656 return commit(ui, repo, recordinwlock, pats, opts)
657
657
658
658
659 class dirnode(object):
659 class dirnode(object):
660 """
660 """
661 Represent a directory in user working copy with information required for
661 Represent a directory in user working copy with information required for
662 the purpose of tersing its status.
662 the purpose of tersing its status.
663
663
664 path is the path to the directory, without a trailing '/'
664 path is the path to the directory, without a trailing '/'
665
665
666 statuses is a set of statuses of all files in this directory (this includes
666 statuses is a set of statuses of all files in this directory (this includes
667 all the files in all the subdirectories too)
667 all the files in all the subdirectories too)
668
668
669 files is a list of files which are direct child of this directory
669 files is a list of files which are direct child of this directory
670
670
671 subdirs is a dictionary of sub-directory name as the key and it's own
671 subdirs is a dictionary of sub-directory name as the key and it's own
672 dirnode object as the value
672 dirnode object as the value
673 """
673 """
674
674
675 def __init__(self, dirpath):
675 def __init__(self, dirpath):
676 self.path = dirpath
676 self.path = dirpath
677 self.statuses = set()
677 self.statuses = set()
678 self.files = []
678 self.files = []
679 self.subdirs = {}
679 self.subdirs = {}
680
680
681 def _addfileindir(self, filename, status):
681 def _addfileindir(self, filename, status):
682 """Add a file in this directory as a direct child."""
682 """Add a file in this directory as a direct child."""
683 self.files.append((filename, status))
683 self.files.append((filename, status))
684
684
685 def addfile(self, filename, status):
685 def addfile(self, filename, status):
686 """
686 """
687 Add a file to this directory or to its direct parent directory.
687 Add a file to this directory or to its direct parent directory.
688
688
689 If the file is not direct child of this directory, we traverse to the
689 If the file is not direct child of this directory, we traverse to the
690 directory of which this file is a direct child of and add the file
690 directory of which this file is a direct child of and add the file
691 there.
691 there.
692 """
692 """
693
693
694 # the filename contains a path separator, it means it's not the direct
694 # the filename contains a path separator, it means it's not the direct
695 # child of this directory
695 # child of this directory
696 if b'/' in filename:
696 if b'/' in filename:
697 subdir, filep = filename.split(b'/', 1)
697 subdir, filep = filename.split(b'/', 1)
698
698
699 # does the dirnode object for subdir exists
699 # does the dirnode object for subdir exists
700 if subdir not in self.subdirs:
700 if subdir not in self.subdirs:
701 subdirpath = pathutil.join(self.path, subdir)
701 subdirpath = pathutil.join(self.path, subdir)
702 self.subdirs[subdir] = dirnode(subdirpath)
702 self.subdirs[subdir] = dirnode(subdirpath)
703
703
704 # try adding the file in subdir
704 # try adding the file in subdir
705 self.subdirs[subdir].addfile(filep, status)
705 self.subdirs[subdir].addfile(filep, status)
706
706
707 else:
707 else:
708 self._addfileindir(filename, status)
708 self._addfileindir(filename, status)
709
709
710 if status not in self.statuses:
710 if status not in self.statuses:
711 self.statuses.add(status)
711 self.statuses.add(status)
712
712
713 def iterfilepaths(self):
713 def iterfilepaths(self):
714 """Yield (status, path) for files directly under this directory."""
714 """Yield (status, path) for files directly under this directory."""
715 for f, st in self.files:
715 for f, st in self.files:
716 yield st, pathutil.join(self.path, f)
716 yield st, pathutil.join(self.path, f)
717
717
718 def tersewalk(self, terseargs):
718 def tersewalk(self, terseargs):
719 """
719 """
720 Yield (status, path) obtained by processing the status of this
720 Yield (status, path) obtained by processing the status of this
721 dirnode.
721 dirnode.
722
722
723 terseargs is the string of arguments passed by the user with `--terse`
723 terseargs is the string of arguments passed by the user with `--terse`
724 flag.
724 flag.
725
725
726 Following are the cases which can happen:
726 Following are the cases which can happen:
727
727
728 1) All the files in the directory (including all the files in its
728 1) All the files in the directory (including all the files in its
729 subdirectories) share the same status and the user has asked us to terse
729 subdirectories) share the same status and the user has asked us to terse
730 that status. -> yield (status, dirpath). dirpath will end in '/'.
730 that status. -> yield (status, dirpath). dirpath will end in '/'.
731
731
732 2) Otherwise, we do following:
732 2) Otherwise, we do following:
733
733
734 a) Yield (status, filepath) for all the files which are in this
734 a) Yield (status, filepath) for all the files which are in this
735 directory (only the ones in this directory, not the subdirs)
735 directory (only the ones in this directory, not the subdirs)
736
736
737 b) Recurse the function on all the subdirectories of this
737 b) Recurse the function on all the subdirectories of this
738 directory
738 directory
739 """
739 """
740
740
741 if len(self.statuses) == 1:
741 if len(self.statuses) == 1:
742 onlyst = self.statuses.pop()
742 onlyst = self.statuses.pop()
743
743
744 # Making sure we terse only when the status abbreviation is
744 # Making sure we terse only when the status abbreviation is
745 # passed as terse argument
745 # passed as terse argument
746 if onlyst in terseargs:
746 if onlyst in terseargs:
747 yield onlyst, self.path + b'/'
747 yield onlyst, self.path + b'/'
748 return
748 return
749
749
750 # add the files to status list
750 # add the files to status list
751 for st, fpath in self.iterfilepaths():
751 for st, fpath in self.iterfilepaths():
752 yield st, fpath
752 yield st, fpath
753
753
754 # recurse on the subdirs
754 # recurse on the subdirs
755 for dirobj in self.subdirs.values():
755 for dirobj in self.subdirs.values():
756 for st, fpath in dirobj.tersewalk(terseargs):
756 for st, fpath in dirobj.tersewalk(terseargs):
757 yield st, fpath
757 yield st, fpath
758
758
759
759
760 def tersedir(statuslist, terseargs):
760 def tersedir(statuslist, terseargs):
761 """
761 """
762 Terse the status if all the files in a directory shares the same status.
762 Terse the status if all the files in a directory shares the same status.
763
763
764 statuslist is scmutil.status() object which contains a list of files for
764 statuslist is scmutil.status() object which contains a list of files for
765 each status.
765 each status.
766 terseargs is string which is passed by the user as the argument to `--terse`
766 terseargs is string which is passed by the user as the argument to `--terse`
767 flag.
767 flag.
768
768
769 The function makes a tree of objects of dirnode class, and at each node it
769 The function makes a tree of objects of dirnode class, and at each node it
770 stores the information required to know whether we can terse a certain
770 stores the information required to know whether we can terse a certain
771 directory or not.
771 directory or not.
772 """
772 """
773 # the order matters here as that is used to produce final list
773 # the order matters here as that is used to produce final list
774 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
774 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
775
775
776 # checking the argument validity
776 # checking the argument validity
777 for s in pycompat.bytestr(terseargs):
777 for s in pycompat.bytestr(terseargs):
778 if s not in allst:
778 if s not in allst:
779 raise error.InputError(_(b"'%s' not recognized") % s)
779 raise error.InputError(_(b"'%s' not recognized") % s)
780
780
781 # creating a dirnode object for the root of the repo
781 # creating a dirnode object for the root of the repo
782 rootobj = dirnode(b'')
782 rootobj = dirnode(b'')
783 pstatus = (
783 pstatus = (
784 b'modified',
784 b'modified',
785 b'added',
785 b'added',
786 b'deleted',
786 b'deleted',
787 b'clean',
787 b'clean',
788 b'unknown',
788 b'unknown',
789 b'ignored',
789 b'ignored',
790 b'removed',
790 b'removed',
791 )
791 )
792
792
793 tersedict = {}
793 tersedict = {}
794 for attrname in pstatus:
794 for attrname in pstatus:
795 statuschar = attrname[0:1]
795 statuschar = attrname[0:1]
796 for f in getattr(statuslist, attrname):
796 for f in getattr(statuslist, attrname):
797 rootobj.addfile(f, statuschar)
797 rootobj.addfile(f, statuschar)
798 tersedict[statuschar] = []
798 tersedict[statuschar] = []
799
799
800 # we won't be tersing the root dir, so add files in it
800 # we won't be tersing the root dir, so add files in it
801 for st, fpath in rootobj.iterfilepaths():
801 for st, fpath in rootobj.iterfilepaths():
802 tersedict[st].append(fpath)
802 tersedict[st].append(fpath)
803
803
804 # process each sub-directory and build tersedict
804 # process each sub-directory and build tersedict
805 for subdir in rootobj.subdirs.values():
805 for subdir in rootobj.subdirs.values():
806 for st, f in subdir.tersewalk(terseargs):
806 for st, f in subdir.tersewalk(terseargs):
807 tersedict[st].append(f)
807 tersedict[st].append(f)
808
808
809 tersedlist = []
809 tersedlist = []
810 for st in allst:
810 for st in allst:
811 tersedict[st].sort()
811 tersedict[st].sort()
812 tersedlist.append(tersedict[st])
812 tersedlist.append(tersedict[st])
813
813
814 return scmutil.status(*tersedlist)
814 return scmutil.status(*tersedlist)
815
815
816
816
817 def _commentlines(raw):
817 def _commentlines(raw):
818 '''Surround lineswith a comment char and a new line'''
818 '''Surround lineswith a comment char and a new line'''
819 lines = raw.splitlines()
819 lines = raw.splitlines()
820 commentedlines = [b'# %s' % line for line in lines]
820 commentedlines = [b'# %s' % line for line in lines]
821 return b'\n'.join(commentedlines) + b'\n'
821 return b'\n'.join(commentedlines) + b'\n'
822
822
823
823
824 @attr.s(frozen=True)
824 @attr.s(frozen=True)
825 class morestatus(object):
825 class morestatus(object):
826 reporoot = attr.ib()
826 reporoot = attr.ib()
827 unfinishedop = attr.ib()
827 unfinishedop = attr.ib()
828 unfinishedmsg = attr.ib()
828 unfinishedmsg = attr.ib()
829 activemerge = attr.ib()
829 activemerge = attr.ib()
830 unresolvedpaths = attr.ib()
830 unresolvedpaths = attr.ib()
831 _formattedpaths = attr.ib(init=False, default=set())
831 _formattedpaths = attr.ib(init=False, default=set())
832 _label = b'status.morestatus'
832 _label = b'status.morestatus'
833
833
834 def formatfile(self, path, fm):
834 def formatfile(self, path, fm):
835 self._formattedpaths.add(path)
835 self._formattedpaths.add(path)
836 if self.activemerge and path in self.unresolvedpaths:
836 if self.activemerge and path in self.unresolvedpaths:
837 fm.data(unresolved=True)
837 fm.data(unresolved=True)
838
838
839 def formatfooter(self, fm):
839 def formatfooter(self, fm):
840 if self.unfinishedop or self.unfinishedmsg:
840 if self.unfinishedop or self.unfinishedmsg:
841 fm.startitem()
841 fm.startitem()
842 fm.data(itemtype=b'morestatus')
842 fm.data(itemtype=b'morestatus')
843
843
844 if self.unfinishedop:
844 if self.unfinishedop:
845 fm.data(unfinished=self.unfinishedop)
845 fm.data(unfinished=self.unfinishedop)
846 statemsg = (
846 statemsg = (
847 _(b'The repository is in an unfinished *%s* state.')
847 _(b'The repository is in an unfinished *%s* state.')
848 % self.unfinishedop
848 % self.unfinishedop
849 )
849 )
850 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
850 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
851 if self.unfinishedmsg:
851 if self.unfinishedmsg:
852 fm.data(unfinishedmsg=self.unfinishedmsg)
852 fm.data(unfinishedmsg=self.unfinishedmsg)
853
853
854 # May also start new data items.
854 # May also start new data items.
855 self._formatconflicts(fm)
855 self._formatconflicts(fm)
856
856
857 if self.unfinishedmsg:
857 if self.unfinishedmsg:
858 fm.plain(
858 fm.plain(
859 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
859 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
860 )
860 )
861
861
862 def _formatconflicts(self, fm):
862 def _formatconflicts(self, fm):
863 if not self.activemerge:
863 if not self.activemerge:
864 return
864 return
865
865
866 if self.unresolvedpaths:
866 if self.unresolvedpaths:
867 mergeliststr = b'\n'.join(
867 mergeliststr = b'\n'.join(
868 [
868 [
869 b' %s'
869 b' %s'
870 % util.pathto(self.reporoot, encoding.getcwd(), path)
870 % util.pathto(self.reporoot, encoding.getcwd(), path)
871 for path in self.unresolvedpaths
871 for path in self.unresolvedpaths
872 ]
872 ]
873 )
873 )
874 msg = (
874 msg = (
875 _(
875 _(
876 b'''Unresolved merge conflicts:
876 b'''Unresolved merge conflicts:
877
877
878 %s
878 %s
879
879
880 To mark files as resolved: hg resolve --mark FILE'''
880 To mark files as resolved: hg resolve --mark FILE'''
881 )
881 )
882 % mergeliststr
882 % mergeliststr
883 )
883 )
884
884
885 # If any paths with unresolved conflicts were not previously
885 # If any paths with unresolved conflicts were not previously
886 # formatted, output them now.
886 # formatted, output them now.
887 for f in self.unresolvedpaths:
887 for f in self.unresolvedpaths:
888 if f in self._formattedpaths:
888 if f in self._formattedpaths:
889 # Already output.
889 # Already output.
890 continue
890 continue
891 fm.startitem()
891 fm.startitem()
892 # We can't claim to know the status of the file - it may just
892 # We can't claim to know the status of the file - it may just
893 # have been in one of the states that were not requested for
893 # have been in one of the states that were not requested for
894 # display, so it could be anything.
894 # display, so it could be anything.
895 fm.data(itemtype=b'file', path=f, unresolved=True)
895 fm.data(itemtype=b'file', path=f, unresolved=True)
896
896
897 else:
897 else:
898 msg = _(b'No unresolved merge conflicts.')
898 msg = _(b'No unresolved merge conflicts.')
899
899
900 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
900 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
901
901
902
902
903 def readmorestatus(repo):
903 def readmorestatus(repo):
904 """Returns a morestatus object if the repo has unfinished state."""
904 """Returns a morestatus object if the repo has unfinished state."""
905 statetuple = statemod.getrepostate(repo)
905 statetuple = statemod.getrepostate(repo)
906 mergestate = mergestatemod.mergestate.read(repo)
906 mergestate = mergestatemod.mergestate.read(repo)
907 activemerge = mergestate.active()
907 activemerge = mergestate.active()
908 if not statetuple and not activemerge:
908 if not statetuple and not activemerge:
909 return None
909 return None
910
910
911 unfinishedop = unfinishedmsg = unresolved = None
911 unfinishedop = unfinishedmsg = unresolved = None
912 if statetuple:
912 if statetuple:
913 unfinishedop, unfinishedmsg = statetuple
913 unfinishedop, unfinishedmsg = statetuple
914 if activemerge:
914 if activemerge:
915 unresolved = sorted(mergestate.unresolved())
915 unresolved = sorted(mergestate.unresolved())
916 return morestatus(
916 return morestatus(
917 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
917 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
918 )
918 )
919
919
920
920
921 def findpossible(cmd, table, strict=False):
921 def findpossible(cmd, table, strict=False):
922 """
922 """
923 Return cmd -> (aliases, command table entry)
923 Return cmd -> (aliases, command table entry)
924 for each matching command.
924 for each matching command.
925 Return debug commands (or their aliases) only if no normal command matches.
925 Return debug commands (or their aliases) only if no normal command matches.
926 """
926 """
927 choice = {}
927 choice = {}
928 debugchoice = {}
928 debugchoice = {}
929
929
930 if cmd in table:
930 if cmd in table:
931 # short-circuit exact matches, "log" alias beats "log|history"
931 # short-circuit exact matches, "log" alias beats "log|history"
932 keys = [cmd]
932 keys = [cmd]
933 else:
933 else:
934 keys = table.keys()
934 keys = table.keys()
935
935
936 allcmds = []
936 allcmds = []
937 for e in keys:
937 for e in keys:
938 aliases = parsealiases(e)
938 aliases = parsealiases(e)
939 allcmds.extend(aliases)
939 allcmds.extend(aliases)
940 found = None
940 found = None
941 if cmd in aliases:
941 if cmd in aliases:
942 found = cmd
942 found = cmd
943 elif not strict:
943 elif not strict:
944 for a in aliases:
944 for a in aliases:
945 if a.startswith(cmd):
945 if a.startswith(cmd):
946 found = a
946 found = a
947 break
947 break
948 if found is not None:
948 if found is not None:
949 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
949 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
950 debugchoice[found] = (aliases, table[e])
950 debugchoice[found] = (aliases, table[e])
951 else:
951 else:
952 choice[found] = (aliases, table[e])
952 choice[found] = (aliases, table[e])
953
953
954 if not choice and debugchoice:
954 if not choice and debugchoice:
955 choice = debugchoice
955 choice = debugchoice
956
956
957 return choice, allcmds
957 return choice, allcmds
958
958
959
959
960 def findcmd(cmd, table, strict=True):
960 def findcmd(cmd, table, strict=True):
961 """Return (aliases, command table entry) for command string."""
961 """Return (aliases, command table entry) for command string."""
962 choice, allcmds = findpossible(cmd, table, strict)
962 choice, allcmds = findpossible(cmd, table, strict)
963
963
964 if cmd in choice:
964 if cmd in choice:
965 return choice[cmd]
965 return choice[cmd]
966
966
967 if len(choice) > 1:
967 if len(choice) > 1:
968 clist = sorted(choice)
968 clist = sorted(choice)
969 raise error.AmbiguousCommand(cmd, clist)
969 raise error.AmbiguousCommand(cmd, clist)
970
970
971 if choice:
971 if choice:
972 return list(choice.values())[0]
972 return list(choice.values())[0]
973
973
974 raise error.UnknownCommand(cmd, allcmds)
974 raise error.UnknownCommand(cmd, allcmds)
975
975
976
976
977 def changebranch(ui, repo, revs, label, opts):
977 def changebranch(ui, repo, revs, label, opts):
978 """Change the branch name of given revs to label"""
978 """Change the branch name of given revs to label"""
979
979
980 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
980 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
981 # abort in case of uncommitted merge or dirty wdir
981 # abort in case of uncommitted merge or dirty wdir
982 bailifchanged(repo)
982 bailifchanged(repo)
983 revs = scmutil.revrange(repo, revs)
983 revs = scmutil.revrange(repo, revs)
984 if not revs:
984 if not revs:
985 raise error.InputError(b"empty revision set")
985 raise error.InputError(b"empty revision set")
986 roots = repo.revs(b'roots(%ld)', revs)
986 roots = repo.revs(b'roots(%ld)', revs)
987 if len(roots) > 1:
987 if len(roots) > 1:
988 raise error.InputError(
988 raise error.InputError(
989 _(b"cannot change branch of non-linear revisions")
989 _(b"cannot change branch of non-linear revisions")
990 )
990 )
991 rewriteutil.precheck(repo, revs, b'change branch of')
991 rewriteutil.precheck(repo, revs, b'change branch of')
992
992
993 root = repo[roots.first()]
993 root = repo[roots.first()]
994 rpb = {parent.branch() for parent in root.parents()}
994 rpb = {parent.branch() for parent in root.parents()}
995 if (
995 if (
996 not opts.get(b'force')
996 not opts.get(b'force')
997 and label not in rpb
997 and label not in rpb
998 and label in repo.branchmap()
998 and label in repo.branchmap()
999 ):
999 ):
1000 raise error.InputError(
1000 raise error.InputError(
1001 _(b"a branch of the same name already exists")
1001 _(b"a branch of the same name already exists")
1002 )
1002 )
1003
1003
1004 # make sure only topological heads
1004 # make sure only topological heads
1005 if repo.revs(b'heads(%ld) - head()', revs):
1005 if repo.revs(b'heads(%ld) - head()', revs):
1006 raise error.InputError(
1006 raise error.InputError(
1007 _(b"cannot change branch in middle of a stack")
1007 _(b"cannot change branch in middle of a stack")
1008 )
1008 )
1009
1009
1010 replacements = {}
1010 replacements = {}
1011 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1011 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1012 # mercurial.subrepo -> mercurial.cmdutil
1012 # mercurial.subrepo -> mercurial.cmdutil
1013 from . import context
1013 from . import context
1014
1014
1015 for rev in revs:
1015 for rev in revs:
1016 ctx = repo[rev]
1016 ctx = repo[rev]
1017 oldbranch = ctx.branch()
1017 oldbranch = ctx.branch()
1018 # check if ctx has same branch
1018 # check if ctx has same branch
1019 if oldbranch == label:
1019 if oldbranch == label:
1020 continue
1020 continue
1021
1021
1022 def filectxfn(repo, newctx, path):
1022 def filectxfn(repo, newctx, path):
1023 try:
1023 try:
1024 return ctx[path]
1024 return ctx[path]
1025 except error.ManifestLookupError:
1025 except error.ManifestLookupError:
1026 return None
1026 return None
1027
1027
1028 ui.debug(
1028 ui.debug(
1029 b"changing branch of '%s' from '%s' to '%s'\n"
1029 b"changing branch of '%s' from '%s' to '%s'\n"
1030 % (hex(ctx.node()), oldbranch, label)
1030 % (hex(ctx.node()), oldbranch, label)
1031 )
1031 )
1032 extra = ctx.extra()
1032 extra = ctx.extra()
1033 extra[b'branch_change'] = hex(ctx.node())
1033 extra[b'branch_change'] = hex(ctx.node())
1034 # While changing branch of set of linear commits, make sure that
1034 # While changing branch of set of linear commits, make sure that
1035 # we base our commits on new parent rather than old parent which
1035 # we base our commits on new parent rather than old parent which
1036 # was obsoleted while changing the branch
1036 # was obsoleted while changing the branch
1037 p1 = ctx.p1().node()
1037 p1 = ctx.p1().node()
1038 p2 = ctx.p2().node()
1038 p2 = ctx.p2().node()
1039 if p1 in replacements:
1039 if p1 in replacements:
1040 p1 = replacements[p1][0]
1040 p1 = replacements[p1][0]
1041 if p2 in replacements:
1041 if p2 in replacements:
1042 p2 = replacements[p2][0]
1042 p2 = replacements[p2][0]
1043
1043
1044 mc = context.memctx(
1044 mc = context.memctx(
1045 repo,
1045 repo,
1046 (p1, p2),
1046 (p1, p2),
1047 ctx.description(),
1047 ctx.description(),
1048 ctx.files(),
1048 ctx.files(),
1049 filectxfn,
1049 filectxfn,
1050 user=ctx.user(),
1050 user=ctx.user(),
1051 date=ctx.date(),
1051 date=ctx.date(),
1052 extra=extra,
1052 extra=extra,
1053 branch=label,
1053 branch=label,
1054 )
1054 )
1055
1055
1056 newnode = repo.commitctx(mc)
1056 newnode = repo.commitctx(mc)
1057 replacements[ctx.node()] = (newnode,)
1057 replacements[ctx.node()] = (newnode,)
1058 ui.debug(b'new node id is %s\n' % hex(newnode))
1058 ui.debug(b'new node id is %s\n' % hex(newnode))
1059
1059
1060 # create obsmarkers and move bookmarks
1060 # create obsmarkers and move bookmarks
1061 scmutil.cleanupnodes(
1061 scmutil.cleanupnodes(
1062 repo, replacements, b'branch-change', fixphase=True
1062 repo, replacements, b'branch-change', fixphase=True
1063 )
1063 )
1064
1064
1065 # move the working copy too
1065 # move the working copy too
1066 wctx = repo[None]
1066 wctx = repo[None]
1067 # in-progress merge is a bit too complex for now.
1067 # in-progress merge is a bit too complex for now.
1068 if len(wctx.parents()) == 1:
1068 if len(wctx.parents()) == 1:
1069 newid = replacements.get(wctx.p1().node())
1069 newid = replacements.get(wctx.p1().node())
1070 if newid is not None:
1070 if newid is not None:
1071 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1071 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1072 # mercurial.cmdutil
1072 # mercurial.cmdutil
1073 from . import hg
1073 from . import hg
1074
1074
1075 hg.update(repo, newid[0], quietempty=True)
1075 hg.update(repo, newid[0], quietempty=True)
1076
1076
1077 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1077 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1078
1078
1079
1079
1080 def findrepo(p):
1080 def findrepo(p):
1081 while not os.path.isdir(os.path.join(p, b".hg")):
1081 while not os.path.isdir(os.path.join(p, b".hg")):
1082 oldp, p = p, os.path.dirname(p)
1082 oldp, p = p, os.path.dirname(p)
1083 if p == oldp:
1083 if p == oldp:
1084 return None
1084 return None
1085
1085
1086 return p
1086 return p
1087
1087
1088
1088
1089 def bailifchanged(repo, merge=True, hint=None):
1089 def bailifchanged(repo, merge=True, hint=None):
1090 """enforce the precondition that working directory must be clean.
1090 """enforce the precondition that working directory must be clean.
1091
1091
1092 'merge' can be set to false if a pending uncommitted merge should be
1092 'merge' can be set to false if a pending uncommitted merge should be
1093 ignored (such as when 'update --check' runs).
1093 ignored (such as when 'update --check' runs).
1094
1094
1095 'hint' is the usual hint given to Abort exception.
1095 'hint' is the usual hint given to Abort exception.
1096 """
1096 """
1097
1097
1098 if merge and repo.dirstate.p2() != repo.nullid:
1098 if merge and repo.dirstate.p2() != repo.nullid:
1099 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1099 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1100 st = repo.status()
1100 st = repo.status()
1101 if st.modified or st.added or st.removed or st.deleted:
1101 if st.modified or st.added or st.removed or st.deleted:
1102 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1102 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1103 ctx = repo[None]
1103 ctx = repo[None]
1104 for s in sorted(ctx.substate):
1104 for s in sorted(ctx.substate):
1105 ctx.sub(s).bailifchanged(hint=hint)
1105 ctx.sub(s).bailifchanged(hint=hint)
1106
1106
1107
1107
1108 def logmessage(ui, opts):
1108 def logmessage(ui, opts):
1109 """get the log message according to -m and -l option"""
1109 """get the log message according to -m and -l option"""
1110
1110
1111 check_at_most_one_arg(opts, b'message', b'logfile')
1111 check_at_most_one_arg(opts, b'message', b'logfile')
1112
1112
1113 message = opts.get(b'message')
1113 message = opts.get(b'message')
1114 logfile = opts.get(b'logfile')
1114 logfile = opts.get(b'logfile')
1115
1115
1116 if not message and logfile:
1116 if not message and logfile:
1117 try:
1117 try:
1118 if isstdiofilename(logfile):
1118 if isstdiofilename(logfile):
1119 message = ui.fin.read()
1119 message = ui.fin.read()
1120 else:
1120 else:
1121 message = b'\n'.join(util.readfile(logfile).splitlines())
1121 message = b'\n'.join(util.readfile(logfile).splitlines())
1122 except IOError as inst:
1122 except IOError as inst:
1123 raise error.Abort(
1123 raise error.Abort(
1124 _(b"can't read commit message '%s': %s")
1124 _(b"can't read commit message '%s': %s")
1125 % (logfile, encoding.strtolocal(inst.strerror))
1125 % (logfile, encoding.strtolocal(inst.strerror))
1126 )
1126 )
1127 return message
1127 return message
1128
1128
1129
1129
1130 def mergeeditform(ctxorbool, baseformname):
1130 def mergeeditform(ctxorbool, baseformname):
1131 """return appropriate editform name (referencing a committemplate)
1131 """return appropriate editform name (referencing a committemplate)
1132
1132
1133 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1133 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1134 merging is committed.
1134 merging is committed.
1135
1135
1136 This returns baseformname with '.merge' appended if it is a merge,
1136 This returns baseformname with '.merge' appended if it is a merge,
1137 otherwise '.normal' is appended.
1137 otherwise '.normal' is appended.
1138 """
1138 """
1139 if isinstance(ctxorbool, bool):
1139 if isinstance(ctxorbool, bool):
1140 if ctxorbool:
1140 if ctxorbool:
1141 return baseformname + b".merge"
1141 return baseformname + b".merge"
1142 elif len(ctxorbool.parents()) > 1:
1142 elif len(ctxorbool.parents()) > 1:
1143 return baseformname + b".merge"
1143 return baseformname + b".merge"
1144
1144
1145 return baseformname + b".normal"
1145 return baseformname + b".normal"
1146
1146
1147
1147
1148 def getcommiteditor(
1148 def getcommiteditor(
1149 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1149 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1150 ):
1150 ):
1151 """get appropriate commit message editor according to '--edit' option
1151 """get appropriate commit message editor according to '--edit' option
1152
1152
1153 'finishdesc' is a function to be called with edited commit message
1153 'finishdesc' is a function to be called with edited commit message
1154 (= 'description' of the new changeset) just after editing, but
1154 (= 'description' of the new changeset) just after editing, but
1155 before checking empty-ness. It should return actual text to be
1155 before checking empty-ness. It should return actual text to be
1156 stored into history. This allows to change description before
1156 stored into history. This allows to change description before
1157 storing.
1157 storing.
1158
1158
1159 'extramsg' is a extra message to be shown in the editor instead of
1159 'extramsg' is a extra message to be shown in the editor instead of
1160 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1160 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1161 is automatically added.
1161 is automatically added.
1162
1162
1163 'editform' is a dot-separated list of names, to distinguish
1163 'editform' is a dot-separated list of names, to distinguish
1164 the purpose of commit text editing.
1164 the purpose of commit text editing.
1165
1165
1166 'getcommiteditor' returns 'commitforceeditor' regardless of
1166 'getcommiteditor' returns 'commitforceeditor' regardless of
1167 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1167 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1168 they are specific for usage in MQ.
1168 they are specific for usage in MQ.
1169 """
1169 """
1170 if edit or finishdesc or extramsg:
1170 if edit or finishdesc or extramsg:
1171 return lambda r, c, s: commitforceeditor(
1171 return lambda r, c, s: commitforceeditor(
1172 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1172 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1173 )
1173 )
1174 elif editform:
1174 elif editform:
1175 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1175 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1176 else:
1176 else:
1177 return commiteditor
1177 return commiteditor
1178
1178
1179
1179
1180 def _escapecommandtemplate(tmpl):
1180 def _escapecommandtemplate(tmpl):
1181 parts = []
1181 parts = []
1182 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1182 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1183 if typ == b'string':
1183 if typ == b'string':
1184 parts.append(stringutil.escapestr(tmpl[start:end]))
1184 parts.append(stringutil.escapestr(tmpl[start:end]))
1185 else:
1185 else:
1186 parts.append(tmpl[start:end])
1186 parts.append(tmpl[start:end])
1187 return b''.join(parts)
1187 return b''.join(parts)
1188
1188
1189
1189
1190 def rendercommandtemplate(ui, tmpl, props):
1190 def rendercommandtemplate(ui, tmpl, props):
1191 r"""Expand a literal template 'tmpl' in a way suitable for command line
1191 r"""Expand a literal template 'tmpl' in a way suitable for command line
1192
1192
1193 '\' in outermost string is not taken as an escape character because it
1193 '\' in outermost string is not taken as an escape character because it
1194 is a directory separator on Windows.
1194 is a directory separator on Windows.
1195
1195
1196 >>> from . import ui as uimod
1196 >>> from . import ui as uimod
1197 >>> ui = uimod.ui()
1197 >>> ui = uimod.ui()
1198 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1198 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1199 'c:\\foo'
1199 'c:\\foo'
1200 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1200 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1201 'c:{path}'
1201 'c:{path}'
1202 """
1202 """
1203 if not tmpl:
1203 if not tmpl:
1204 return tmpl
1204 return tmpl
1205 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1205 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1206 return t.renderdefault(props)
1206 return t.renderdefault(props)
1207
1207
1208
1208
1209 def rendertemplate(ctx, tmpl, props=None):
1209 def rendertemplate(ctx, tmpl, props=None):
1210 """Expand a literal template 'tmpl' byte-string against one changeset
1210 """Expand a literal template 'tmpl' byte-string against one changeset
1211
1211
1212 Each props item must be a stringify-able value or a callable returning
1212 Each props item must be a stringify-able value or a callable returning
1213 such value, i.e. no bare list nor dict should be passed.
1213 such value, i.e. no bare list nor dict should be passed.
1214 """
1214 """
1215 repo = ctx.repo()
1215 repo = ctx.repo()
1216 tres = formatter.templateresources(repo.ui, repo)
1216 tres = formatter.templateresources(repo.ui, repo)
1217 t = formatter.maketemplater(
1217 t = formatter.maketemplater(
1218 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1218 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1219 )
1219 )
1220 mapping = {b'ctx': ctx}
1220 mapping = {b'ctx': ctx}
1221 if props:
1221 if props:
1222 mapping.update(props)
1222 mapping.update(props)
1223 return t.renderdefault(mapping)
1223 return t.renderdefault(mapping)
1224
1224
1225
1225
1226 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1226 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1227 """Format a changeset summary (one line)."""
1227 """Format a changeset summary (one line)."""
1228 spec = None
1228 spec = None
1229 if command:
1229 if command:
1230 spec = ui.config(
1230 spec = ui.config(
1231 b'command-templates', b'oneline-summary.%s' % command, None
1231 b'command-templates', b'oneline-summary.%s' % command, None
1232 )
1232 )
1233 if not spec:
1233 if not spec:
1234 spec = ui.config(b'command-templates', b'oneline-summary')
1234 spec = ui.config(b'command-templates', b'oneline-summary')
1235 if not spec:
1235 if not spec:
1236 spec = default_spec
1236 spec = default_spec
1237 if not spec:
1237 if not spec:
1238 spec = (
1238 spec = (
1239 b'{separate(" ", '
1239 b'{separate(" ", '
1240 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1240 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1241 b', '
1241 b', '
1242 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1242 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1243 b')} '
1243 b')} '
1244 b'"{label("oneline-summary.desc", desc|firstline)}"'
1244 b'"{label("oneline-summary.desc", desc|firstline)}"'
1245 )
1245 )
1246 text = rendertemplate(ctx, spec)
1246 text = rendertemplate(ctx, spec)
1247 return text.split(b'\n')[0]
1247 return text.split(b'\n')[0]
1248
1248
1249
1249
1250 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1250 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1251 r"""Convert old-style filename format string to template string
1251 r"""Convert old-style filename format string to template string
1252
1252
1253 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1253 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1254 'foo-{reporoot|basename}-{seqno}.patch'
1254 'foo-{reporoot|basename}-{seqno}.patch'
1255 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1255 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1256 '{rev}{tags % "{tag}"}{node}'
1256 '{rev}{tags % "{tag}"}{node}'
1257
1257
1258 '\' in outermost strings has to be escaped because it is a directory
1258 '\' in outermost strings has to be escaped because it is a directory
1259 separator on Windows:
1259 separator on Windows:
1260
1260
1261 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1261 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1262 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1262 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1263 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1263 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1264 '\\\\\\\\foo\\\\bar.patch'
1264 '\\\\\\\\foo\\\\bar.patch'
1265 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1265 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1266 '\\\\{tags % "{tag}"}'
1266 '\\\\{tags % "{tag}"}'
1267
1267
1268 but inner strings follow the template rules (i.e. '\' is taken as an
1268 but inner strings follow the template rules (i.e. '\' is taken as an
1269 escape character):
1269 escape character):
1270
1270
1271 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1271 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1272 '{"c:\\tmp"}'
1272 '{"c:\\tmp"}'
1273 """
1273 """
1274 expander = {
1274 expander = {
1275 b'H': b'{node}',
1275 b'H': b'{node}',
1276 b'R': b'{rev}',
1276 b'R': b'{rev}',
1277 b'h': b'{node|short}',
1277 b'h': b'{node|short}',
1278 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1278 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1279 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1279 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1280 b'%': b'%',
1280 b'%': b'%',
1281 b'b': b'{reporoot|basename}',
1281 b'b': b'{reporoot|basename}',
1282 }
1282 }
1283 if total is not None:
1283 if total is not None:
1284 expander[b'N'] = b'{total}'
1284 expander[b'N'] = b'{total}'
1285 if seqno is not None:
1285 if seqno is not None:
1286 expander[b'n'] = b'{seqno}'
1286 expander[b'n'] = b'{seqno}'
1287 if total is not None and seqno is not None:
1287 if total is not None and seqno is not None:
1288 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1288 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1289 if pathname is not None:
1289 if pathname is not None:
1290 expander[b's'] = b'{pathname|basename}'
1290 expander[b's'] = b'{pathname|basename}'
1291 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1291 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1292 expander[b'p'] = b'{pathname}'
1292 expander[b'p'] = b'{pathname}'
1293
1293
1294 newname = []
1294 newname = []
1295 for typ, start, end in templater.scantemplate(pat, raw=True):
1295 for typ, start, end in templater.scantemplate(pat, raw=True):
1296 if typ != b'string':
1296 if typ != b'string':
1297 newname.append(pat[start:end])
1297 newname.append(pat[start:end])
1298 continue
1298 continue
1299 i = start
1299 i = start
1300 while i < end:
1300 while i < end:
1301 n = pat.find(b'%', i, end)
1301 n = pat.find(b'%', i, end)
1302 if n < 0:
1302 if n < 0:
1303 newname.append(stringutil.escapestr(pat[i:end]))
1303 newname.append(stringutil.escapestr(pat[i:end]))
1304 break
1304 break
1305 newname.append(stringutil.escapestr(pat[i:n]))
1305 newname.append(stringutil.escapestr(pat[i:n]))
1306 if n + 2 > end:
1306 if n + 2 > end:
1307 raise error.Abort(
1307 raise error.Abort(
1308 _(b"incomplete format spec in output filename")
1308 _(b"incomplete format spec in output filename")
1309 )
1309 )
1310 c = pat[n + 1 : n + 2]
1310 c = pat[n + 1 : n + 2]
1311 i = n + 2
1311 i = n + 2
1312 try:
1312 try:
1313 newname.append(expander[c])
1313 newname.append(expander[c])
1314 except KeyError:
1314 except KeyError:
1315 raise error.Abort(
1315 raise error.Abort(
1316 _(b"invalid format spec '%%%s' in output filename") % c
1316 _(b"invalid format spec '%%%s' in output filename") % c
1317 )
1317 )
1318 return b''.join(newname)
1318 return b''.join(newname)
1319
1319
1320
1320
1321 def makefilename(ctx, pat, **props):
1321 def makefilename(ctx, pat, **props):
1322 if not pat:
1322 if not pat:
1323 return pat
1323 return pat
1324 tmpl = _buildfntemplate(pat, **props)
1324 tmpl = _buildfntemplate(pat, **props)
1325 # BUG: alias expansion shouldn't be made against template fragments
1325 # BUG: alias expansion shouldn't be made against template fragments
1326 # rewritten from %-format strings, but we have no easy way to partially
1326 # rewritten from %-format strings, but we have no easy way to partially
1327 # disable the expansion.
1327 # disable the expansion.
1328 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1328 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1329
1329
1330
1330
1331 def isstdiofilename(pat):
1331 def isstdiofilename(pat):
1332 """True if the given pat looks like a filename denoting stdin/stdout"""
1332 """True if the given pat looks like a filename denoting stdin/stdout"""
1333 return not pat or pat == b'-'
1333 return not pat or pat == b'-'
1334
1334
1335
1335
1336 class _unclosablefile(object):
1336 class _unclosablefile(object):
1337 def __init__(self, fp):
1337 def __init__(self, fp):
1338 self._fp = fp
1338 self._fp = fp
1339
1339
1340 def close(self):
1340 def close(self):
1341 pass
1341 pass
1342
1342
1343 def __iter__(self):
1343 def __iter__(self):
1344 return iter(self._fp)
1344 return iter(self._fp)
1345
1345
1346 def __getattr__(self, attr):
1346 def __getattr__(self, attr):
1347 return getattr(self._fp, attr)
1347 return getattr(self._fp, attr)
1348
1348
1349 def __enter__(self):
1349 def __enter__(self):
1350 return self
1350 return self
1351
1351
1352 def __exit__(self, exc_type, exc_value, exc_tb):
1352 def __exit__(self, exc_type, exc_value, exc_tb):
1353 pass
1353 pass
1354
1354
1355
1355
1356 def makefileobj(ctx, pat, mode=b'wb', **props):
1356 def makefileobj(ctx, pat, mode=b'wb', **props):
1357 writable = mode not in (b'r', b'rb')
1357 writable = mode not in (b'r', b'rb')
1358
1358
1359 if isstdiofilename(pat):
1359 if isstdiofilename(pat):
1360 repo = ctx.repo()
1360 repo = ctx.repo()
1361 if writable:
1361 if writable:
1362 fp = repo.ui.fout
1362 fp = repo.ui.fout
1363 else:
1363 else:
1364 fp = repo.ui.fin
1364 fp = repo.ui.fin
1365 return _unclosablefile(fp)
1365 return _unclosablefile(fp)
1366 fn = makefilename(ctx, pat, **props)
1366 fn = makefilename(ctx, pat, **props)
1367 return open(fn, mode)
1367 return open(fn, mode)
1368
1368
1369
1369
1370 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1370 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1371 """opens the changelog, manifest, a filelog or a given revlog"""
1371 """opens the changelog, manifest, a filelog or a given revlog"""
1372 cl = opts[b'changelog']
1372 cl = opts[b'changelog']
1373 mf = opts[b'manifest']
1373 mf = opts[b'manifest']
1374 dir = opts[b'dir']
1374 dir = opts[b'dir']
1375 msg = None
1375 msg = None
1376 if cl and mf:
1376 if cl and mf:
1377 msg = _(b'cannot specify --changelog and --manifest at the same time')
1377 msg = _(b'cannot specify --changelog and --manifest at the same time')
1378 elif cl and dir:
1378 elif cl and dir:
1379 msg = _(b'cannot specify --changelog and --dir at the same time')
1379 msg = _(b'cannot specify --changelog and --dir at the same time')
1380 elif cl or mf or dir:
1380 elif cl or mf or dir:
1381 if file_:
1381 if file_:
1382 msg = _(b'cannot specify filename with --changelog or --manifest')
1382 msg = _(b'cannot specify filename with --changelog or --manifest')
1383 elif not repo:
1383 elif not repo:
1384 msg = _(
1384 msg = _(
1385 b'cannot specify --changelog or --manifest or --dir '
1385 b'cannot specify --changelog or --manifest or --dir '
1386 b'without a repository'
1386 b'without a repository'
1387 )
1387 )
1388 if msg:
1388 if msg:
1389 raise error.InputError(msg)
1389 raise error.InputError(msg)
1390
1390
1391 r = None
1391 r = None
1392 if repo:
1392 if repo:
1393 if cl:
1393 if cl:
1394 r = repo.unfiltered().changelog
1394 r = repo.unfiltered().changelog
1395 elif dir:
1395 elif dir:
1396 if not scmutil.istreemanifest(repo):
1396 if not scmutil.istreemanifest(repo):
1397 raise error.InputError(
1397 raise error.InputError(
1398 _(
1398 _(
1399 b"--dir can only be used on repos with "
1399 b"--dir can only be used on repos with "
1400 b"treemanifest enabled"
1400 b"treemanifest enabled"
1401 )
1401 )
1402 )
1402 )
1403 if not dir.endswith(b'/'):
1403 if not dir.endswith(b'/'):
1404 dir = dir + b'/'
1404 dir = dir + b'/'
1405 dirlog = repo.manifestlog.getstorage(dir)
1405 dirlog = repo.manifestlog.getstorage(dir)
1406 if len(dirlog):
1406 if len(dirlog):
1407 r = dirlog
1407 r = dirlog
1408 elif mf:
1408 elif mf:
1409 r = repo.manifestlog.getstorage(b'')
1409 r = repo.manifestlog.getstorage(b'')
1410 elif file_:
1410 elif file_:
1411 filelog = repo.file(file_)
1411 filelog = repo.file(file_)
1412 if len(filelog):
1412 if len(filelog):
1413 r = filelog
1413 r = filelog
1414
1414
1415 # Not all storage may be revlogs. If requested, try to return an actual
1415 # Not all storage may be revlogs. If requested, try to return an actual
1416 # revlog instance.
1416 # revlog instance.
1417 if returnrevlog:
1417 if returnrevlog:
1418 if isinstance(r, revlog.revlog):
1418 if isinstance(r, revlog.revlog):
1419 pass
1419 pass
1420 elif util.safehasattr(r, b'_revlog'):
1420 elif util.safehasattr(r, b'_revlog'):
1421 r = r._revlog # pytype: disable=attribute-error
1421 r = r._revlog # pytype: disable=attribute-error
1422 elif r is not None:
1422 elif r is not None:
1423 raise error.InputError(
1423 raise error.InputError(
1424 _(b'%r does not appear to be a revlog') % r
1424 _(b'%r does not appear to be a revlog') % r
1425 )
1425 )
1426
1426
1427 if not r:
1427 if not r:
1428 if not returnrevlog:
1428 if not returnrevlog:
1429 raise error.InputError(_(b'cannot give path to non-revlog'))
1429 raise error.InputError(_(b'cannot give path to non-revlog'))
1430
1430
1431 if not file_:
1431 if not file_:
1432 raise error.CommandError(cmd, _(b'invalid arguments'))
1432 raise error.CommandError(cmd, _(b'invalid arguments'))
1433 if not os.path.isfile(file_):
1433 if not os.path.isfile(file_):
1434 raise error.InputError(_(b"revlog '%s' not found") % file_)
1434 raise error.InputError(_(b"revlog '%s' not found") % file_)
1435
1435
1436 target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
1436 target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
1437 r = revlog.revlog(
1437 r = revlog.revlog(
1438 vfsmod.vfs(encoding.getcwd(), audit=False),
1438 vfsmod.vfs(encoding.getcwd(), audit=False),
1439 target=target,
1439 target=target,
1440 indexfile=file_[:-2] + b".i",
1440 radix=file_[:-2],
1441 )
1441 )
1442 return r
1442 return r
1443
1443
1444
1444
1445 def openrevlog(repo, cmd, file_, opts):
1445 def openrevlog(repo, cmd, file_, opts):
1446 """Obtain a revlog backing storage of an item.
1446 """Obtain a revlog backing storage of an item.
1447
1447
1448 This is similar to ``openstorage()`` except it always returns a revlog.
1448 This is similar to ``openstorage()`` except it always returns a revlog.
1449
1449
1450 In most cases, a caller cares about the main storage object - not the
1450 In most cases, a caller cares about the main storage object - not the
1451 revlog backing it. Therefore, this function should only be used by code
1451 revlog backing it. Therefore, this function should only be used by code
1452 that needs to examine low-level revlog implementation details. e.g. debug
1452 that needs to examine low-level revlog implementation details. e.g. debug
1453 commands.
1453 commands.
1454 """
1454 """
1455 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1455 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1456
1456
1457
1457
1458 def copy(ui, repo, pats, opts, rename=False):
1458 def copy(ui, repo, pats, opts, rename=False):
1459 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1459 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1460
1460
1461 # called with the repo lock held
1461 # called with the repo lock held
1462 #
1462 #
1463 # hgsep => pathname that uses "/" to separate directories
1463 # hgsep => pathname that uses "/" to separate directories
1464 # ossep => pathname that uses os.sep to separate directories
1464 # ossep => pathname that uses os.sep to separate directories
1465 cwd = repo.getcwd()
1465 cwd = repo.getcwd()
1466 targets = {}
1466 targets = {}
1467 forget = opts.get(b"forget")
1467 forget = opts.get(b"forget")
1468 after = opts.get(b"after")
1468 after = opts.get(b"after")
1469 dryrun = opts.get(b"dry_run")
1469 dryrun = opts.get(b"dry_run")
1470 rev = opts.get(b'at_rev')
1470 rev = opts.get(b'at_rev')
1471 if rev:
1471 if rev:
1472 if not forget and not after:
1472 if not forget and not after:
1473 # TODO: Remove this restriction and make it also create the copy
1473 # TODO: Remove this restriction and make it also create the copy
1474 # targets (and remove the rename source if rename==True).
1474 # targets (and remove the rename source if rename==True).
1475 raise error.InputError(_(b'--at-rev requires --after'))
1475 raise error.InputError(_(b'--at-rev requires --after'))
1476 ctx = scmutil.revsingle(repo, rev)
1476 ctx = scmutil.revsingle(repo, rev)
1477 if len(ctx.parents()) > 1:
1477 if len(ctx.parents()) > 1:
1478 raise error.InputError(
1478 raise error.InputError(
1479 _(b'cannot mark/unmark copy in merge commit')
1479 _(b'cannot mark/unmark copy in merge commit')
1480 )
1480 )
1481 else:
1481 else:
1482 ctx = repo[None]
1482 ctx = repo[None]
1483
1483
1484 pctx = ctx.p1()
1484 pctx = ctx.p1()
1485
1485
1486 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1486 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1487
1487
1488 if forget:
1488 if forget:
1489 if ctx.rev() is None:
1489 if ctx.rev() is None:
1490 new_ctx = ctx
1490 new_ctx = ctx
1491 else:
1491 else:
1492 if len(ctx.parents()) > 1:
1492 if len(ctx.parents()) > 1:
1493 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1493 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1494 # avoid cycle context -> subrepo -> cmdutil
1494 # avoid cycle context -> subrepo -> cmdutil
1495 from . import context
1495 from . import context
1496
1496
1497 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1497 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1498 new_ctx = context.overlayworkingctx(repo)
1498 new_ctx = context.overlayworkingctx(repo)
1499 new_ctx.setbase(ctx.p1())
1499 new_ctx.setbase(ctx.p1())
1500 mergemod.graft(repo, ctx, wctx=new_ctx)
1500 mergemod.graft(repo, ctx, wctx=new_ctx)
1501
1501
1502 match = scmutil.match(ctx, pats, opts)
1502 match = scmutil.match(ctx, pats, opts)
1503
1503
1504 current_copies = ctx.p1copies()
1504 current_copies = ctx.p1copies()
1505 current_copies.update(ctx.p2copies())
1505 current_copies.update(ctx.p2copies())
1506
1506
1507 uipathfn = scmutil.getuipathfn(repo)
1507 uipathfn = scmutil.getuipathfn(repo)
1508 for f in ctx.walk(match):
1508 for f in ctx.walk(match):
1509 if f in current_copies:
1509 if f in current_copies:
1510 new_ctx[f].markcopied(None)
1510 new_ctx[f].markcopied(None)
1511 elif match.exact(f):
1511 elif match.exact(f):
1512 ui.warn(
1512 ui.warn(
1513 _(
1513 _(
1514 b'%s: not unmarking as copy - file is not marked as copied\n'
1514 b'%s: not unmarking as copy - file is not marked as copied\n'
1515 )
1515 )
1516 % uipathfn(f)
1516 % uipathfn(f)
1517 )
1517 )
1518
1518
1519 if ctx.rev() is not None:
1519 if ctx.rev() is not None:
1520 with repo.lock():
1520 with repo.lock():
1521 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1521 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1522 new_node = mem_ctx.commit()
1522 new_node = mem_ctx.commit()
1523
1523
1524 if repo.dirstate.p1() == ctx.node():
1524 if repo.dirstate.p1() == ctx.node():
1525 with repo.dirstate.parentchange():
1525 with repo.dirstate.parentchange():
1526 scmutil.movedirstate(repo, repo[new_node])
1526 scmutil.movedirstate(repo, repo[new_node])
1527 replacements = {ctx.node(): [new_node]}
1527 replacements = {ctx.node(): [new_node]}
1528 scmutil.cleanupnodes(
1528 scmutil.cleanupnodes(
1529 repo, replacements, b'uncopy', fixphase=True
1529 repo, replacements, b'uncopy', fixphase=True
1530 )
1530 )
1531
1531
1532 return
1532 return
1533
1533
1534 pats = scmutil.expandpats(pats)
1534 pats = scmutil.expandpats(pats)
1535 if not pats:
1535 if not pats:
1536 raise error.InputError(_(b'no source or destination specified'))
1536 raise error.InputError(_(b'no source or destination specified'))
1537 if len(pats) == 1:
1537 if len(pats) == 1:
1538 raise error.InputError(_(b'no destination specified'))
1538 raise error.InputError(_(b'no destination specified'))
1539 dest = pats.pop()
1539 dest = pats.pop()
1540
1540
1541 def walkpat(pat):
1541 def walkpat(pat):
1542 srcs = []
1542 srcs = []
1543 # TODO: Inline and simplify the non-working-copy version of this code
1543 # TODO: Inline and simplify the non-working-copy version of this code
1544 # since it shares very little with the working-copy version of it.
1544 # since it shares very little with the working-copy version of it.
1545 ctx_to_walk = ctx if ctx.rev() is None else pctx
1545 ctx_to_walk = ctx if ctx.rev() is None else pctx
1546 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1546 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1547 for abs in ctx_to_walk.walk(m):
1547 for abs in ctx_to_walk.walk(m):
1548 rel = uipathfn(abs)
1548 rel = uipathfn(abs)
1549 exact = m.exact(abs)
1549 exact = m.exact(abs)
1550 if abs not in ctx:
1550 if abs not in ctx:
1551 if abs in pctx:
1551 if abs in pctx:
1552 if not after:
1552 if not after:
1553 if exact:
1553 if exact:
1554 ui.warn(
1554 ui.warn(
1555 _(
1555 _(
1556 b'%s: not copying - file has been marked '
1556 b'%s: not copying - file has been marked '
1557 b'for remove\n'
1557 b'for remove\n'
1558 )
1558 )
1559 % rel
1559 % rel
1560 )
1560 )
1561 continue
1561 continue
1562 else:
1562 else:
1563 if exact:
1563 if exact:
1564 ui.warn(
1564 ui.warn(
1565 _(b'%s: not copying - file is not managed\n') % rel
1565 _(b'%s: not copying - file is not managed\n') % rel
1566 )
1566 )
1567 continue
1567 continue
1568
1568
1569 # abs: hgsep
1569 # abs: hgsep
1570 # rel: ossep
1570 # rel: ossep
1571 srcs.append((abs, rel, exact))
1571 srcs.append((abs, rel, exact))
1572 return srcs
1572 return srcs
1573
1573
1574 if ctx.rev() is not None:
1574 if ctx.rev() is not None:
1575 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1575 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1576 absdest = pathutil.canonpath(repo.root, cwd, dest)
1576 absdest = pathutil.canonpath(repo.root, cwd, dest)
1577 if ctx.hasdir(absdest):
1577 if ctx.hasdir(absdest):
1578 raise error.InputError(
1578 raise error.InputError(
1579 _(b'%s: --at-rev does not support a directory as destination')
1579 _(b'%s: --at-rev does not support a directory as destination')
1580 % uipathfn(absdest)
1580 % uipathfn(absdest)
1581 )
1581 )
1582 if absdest not in ctx:
1582 if absdest not in ctx:
1583 raise error.InputError(
1583 raise error.InputError(
1584 _(b'%s: copy destination does not exist in %s')
1584 _(b'%s: copy destination does not exist in %s')
1585 % (uipathfn(absdest), ctx)
1585 % (uipathfn(absdest), ctx)
1586 )
1586 )
1587
1587
1588 # avoid cycle context -> subrepo -> cmdutil
1588 # avoid cycle context -> subrepo -> cmdutil
1589 from . import context
1589 from . import context
1590
1590
1591 copylist = []
1591 copylist = []
1592 for pat in pats:
1592 for pat in pats:
1593 srcs = walkpat(pat)
1593 srcs = walkpat(pat)
1594 if not srcs:
1594 if not srcs:
1595 continue
1595 continue
1596 for abs, rel, exact in srcs:
1596 for abs, rel, exact in srcs:
1597 copylist.append(abs)
1597 copylist.append(abs)
1598
1598
1599 if not copylist:
1599 if not copylist:
1600 raise error.InputError(_(b'no files to copy'))
1600 raise error.InputError(_(b'no files to copy'))
1601 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1601 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1602 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1602 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1603 # existing functions below.
1603 # existing functions below.
1604 if len(copylist) != 1:
1604 if len(copylist) != 1:
1605 raise error.InputError(_(b'--at-rev requires a single source'))
1605 raise error.InputError(_(b'--at-rev requires a single source'))
1606
1606
1607 new_ctx = context.overlayworkingctx(repo)
1607 new_ctx = context.overlayworkingctx(repo)
1608 new_ctx.setbase(ctx.p1())
1608 new_ctx.setbase(ctx.p1())
1609 mergemod.graft(repo, ctx, wctx=new_ctx)
1609 mergemod.graft(repo, ctx, wctx=new_ctx)
1610
1610
1611 new_ctx.markcopied(absdest, copylist[0])
1611 new_ctx.markcopied(absdest, copylist[0])
1612
1612
1613 with repo.lock():
1613 with repo.lock():
1614 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1614 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1615 new_node = mem_ctx.commit()
1615 new_node = mem_ctx.commit()
1616
1616
1617 if repo.dirstate.p1() == ctx.node():
1617 if repo.dirstate.p1() == ctx.node():
1618 with repo.dirstate.parentchange():
1618 with repo.dirstate.parentchange():
1619 scmutil.movedirstate(repo, repo[new_node])
1619 scmutil.movedirstate(repo, repo[new_node])
1620 replacements = {ctx.node(): [new_node]}
1620 replacements = {ctx.node(): [new_node]}
1621 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1621 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1622
1622
1623 return
1623 return
1624
1624
1625 # abssrc: hgsep
1625 # abssrc: hgsep
1626 # relsrc: ossep
1626 # relsrc: ossep
1627 # otarget: ossep
1627 # otarget: ossep
1628 def copyfile(abssrc, relsrc, otarget, exact):
1628 def copyfile(abssrc, relsrc, otarget, exact):
1629 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1629 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1630 if b'/' in abstarget:
1630 if b'/' in abstarget:
1631 # We cannot normalize abstarget itself, this would prevent
1631 # We cannot normalize abstarget itself, this would prevent
1632 # case only renames, like a => A.
1632 # case only renames, like a => A.
1633 abspath, absname = abstarget.rsplit(b'/', 1)
1633 abspath, absname = abstarget.rsplit(b'/', 1)
1634 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1634 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1635 reltarget = repo.pathto(abstarget, cwd)
1635 reltarget = repo.pathto(abstarget, cwd)
1636 target = repo.wjoin(abstarget)
1636 target = repo.wjoin(abstarget)
1637 src = repo.wjoin(abssrc)
1637 src = repo.wjoin(abssrc)
1638 state = repo.dirstate[abstarget]
1638 state = repo.dirstate[abstarget]
1639
1639
1640 scmutil.checkportable(ui, abstarget)
1640 scmutil.checkportable(ui, abstarget)
1641
1641
1642 # check for collisions
1642 # check for collisions
1643 prevsrc = targets.get(abstarget)
1643 prevsrc = targets.get(abstarget)
1644 if prevsrc is not None:
1644 if prevsrc is not None:
1645 ui.warn(
1645 ui.warn(
1646 _(b'%s: not overwriting - %s collides with %s\n')
1646 _(b'%s: not overwriting - %s collides with %s\n')
1647 % (
1647 % (
1648 reltarget,
1648 reltarget,
1649 repo.pathto(abssrc, cwd),
1649 repo.pathto(abssrc, cwd),
1650 repo.pathto(prevsrc, cwd),
1650 repo.pathto(prevsrc, cwd),
1651 )
1651 )
1652 )
1652 )
1653 return True # report a failure
1653 return True # report a failure
1654
1654
1655 # check for overwrites
1655 # check for overwrites
1656 exists = os.path.lexists(target)
1656 exists = os.path.lexists(target)
1657 samefile = False
1657 samefile = False
1658 if exists and abssrc != abstarget:
1658 if exists and abssrc != abstarget:
1659 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1659 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1660 abstarget
1660 abstarget
1661 ):
1661 ):
1662 if not rename:
1662 if not rename:
1663 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1663 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1664 return True # report a failure
1664 return True # report a failure
1665 exists = False
1665 exists = False
1666 samefile = True
1666 samefile = True
1667
1667
1668 if not after and exists or after and state in b'mn':
1668 if not after and exists or after and state in b'mn':
1669 if not opts[b'force']:
1669 if not opts[b'force']:
1670 if state in b'mn':
1670 if state in b'mn':
1671 msg = _(b'%s: not overwriting - file already committed\n')
1671 msg = _(b'%s: not overwriting - file already committed\n')
1672 if after:
1672 if after:
1673 flags = b'--after --force'
1673 flags = b'--after --force'
1674 else:
1674 else:
1675 flags = b'--force'
1675 flags = b'--force'
1676 if rename:
1676 if rename:
1677 hint = (
1677 hint = (
1678 _(
1678 _(
1679 b"('hg rename %s' to replace the file by "
1679 b"('hg rename %s' to replace the file by "
1680 b'recording a rename)\n'
1680 b'recording a rename)\n'
1681 )
1681 )
1682 % flags
1682 % flags
1683 )
1683 )
1684 else:
1684 else:
1685 hint = (
1685 hint = (
1686 _(
1686 _(
1687 b"('hg copy %s' to replace the file by "
1687 b"('hg copy %s' to replace the file by "
1688 b'recording a copy)\n'
1688 b'recording a copy)\n'
1689 )
1689 )
1690 % flags
1690 % flags
1691 )
1691 )
1692 else:
1692 else:
1693 msg = _(b'%s: not overwriting - file exists\n')
1693 msg = _(b'%s: not overwriting - file exists\n')
1694 if rename:
1694 if rename:
1695 hint = _(
1695 hint = _(
1696 b"('hg rename --after' to record the rename)\n"
1696 b"('hg rename --after' to record the rename)\n"
1697 )
1697 )
1698 else:
1698 else:
1699 hint = _(b"('hg copy --after' to record the copy)\n")
1699 hint = _(b"('hg copy --after' to record the copy)\n")
1700 ui.warn(msg % reltarget)
1700 ui.warn(msg % reltarget)
1701 ui.warn(hint)
1701 ui.warn(hint)
1702 return True # report a failure
1702 return True # report a failure
1703
1703
1704 if after:
1704 if after:
1705 if not exists:
1705 if not exists:
1706 if rename:
1706 if rename:
1707 ui.warn(
1707 ui.warn(
1708 _(b'%s: not recording move - %s does not exist\n')
1708 _(b'%s: not recording move - %s does not exist\n')
1709 % (relsrc, reltarget)
1709 % (relsrc, reltarget)
1710 )
1710 )
1711 else:
1711 else:
1712 ui.warn(
1712 ui.warn(
1713 _(b'%s: not recording copy - %s does not exist\n')
1713 _(b'%s: not recording copy - %s does not exist\n')
1714 % (relsrc, reltarget)
1714 % (relsrc, reltarget)
1715 )
1715 )
1716 return True # report a failure
1716 return True # report a failure
1717 elif not dryrun:
1717 elif not dryrun:
1718 try:
1718 try:
1719 if exists:
1719 if exists:
1720 os.unlink(target)
1720 os.unlink(target)
1721 targetdir = os.path.dirname(target) or b'.'
1721 targetdir = os.path.dirname(target) or b'.'
1722 if not os.path.isdir(targetdir):
1722 if not os.path.isdir(targetdir):
1723 os.makedirs(targetdir)
1723 os.makedirs(targetdir)
1724 if samefile:
1724 if samefile:
1725 tmp = target + b"~hgrename"
1725 tmp = target + b"~hgrename"
1726 os.rename(src, tmp)
1726 os.rename(src, tmp)
1727 os.rename(tmp, target)
1727 os.rename(tmp, target)
1728 else:
1728 else:
1729 # Preserve stat info on renames, not on copies; this matches
1729 # Preserve stat info on renames, not on copies; this matches
1730 # Linux CLI behavior.
1730 # Linux CLI behavior.
1731 util.copyfile(src, target, copystat=rename)
1731 util.copyfile(src, target, copystat=rename)
1732 srcexists = True
1732 srcexists = True
1733 except IOError as inst:
1733 except IOError as inst:
1734 if inst.errno == errno.ENOENT:
1734 if inst.errno == errno.ENOENT:
1735 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1735 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1736 srcexists = False
1736 srcexists = False
1737 else:
1737 else:
1738 ui.warn(
1738 ui.warn(
1739 _(b'%s: cannot copy - %s\n')
1739 _(b'%s: cannot copy - %s\n')
1740 % (relsrc, encoding.strtolocal(inst.strerror))
1740 % (relsrc, encoding.strtolocal(inst.strerror))
1741 )
1741 )
1742 return True # report a failure
1742 return True # report a failure
1743
1743
1744 if ui.verbose or not exact:
1744 if ui.verbose or not exact:
1745 if rename:
1745 if rename:
1746 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1746 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1747 else:
1747 else:
1748 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1748 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1749
1749
1750 targets[abstarget] = abssrc
1750 targets[abstarget] = abssrc
1751
1751
1752 # fix up dirstate
1752 # fix up dirstate
1753 scmutil.dirstatecopy(
1753 scmutil.dirstatecopy(
1754 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1754 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1755 )
1755 )
1756 if rename and not dryrun:
1756 if rename and not dryrun:
1757 if not after and srcexists and not samefile:
1757 if not after and srcexists and not samefile:
1758 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1758 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1759 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1759 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1760 ctx.forget([abssrc])
1760 ctx.forget([abssrc])
1761
1761
1762 # pat: ossep
1762 # pat: ossep
1763 # dest ossep
1763 # dest ossep
1764 # srcs: list of (hgsep, hgsep, ossep, bool)
1764 # srcs: list of (hgsep, hgsep, ossep, bool)
1765 # return: function that takes hgsep and returns ossep
1765 # return: function that takes hgsep and returns ossep
1766 def targetpathfn(pat, dest, srcs):
1766 def targetpathfn(pat, dest, srcs):
1767 if os.path.isdir(pat):
1767 if os.path.isdir(pat):
1768 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1768 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1769 abspfx = util.localpath(abspfx)
1769 abspfx = util.localpath(abspfx)
1770 if destdirexists:
1770 if destdirexists:
1771 striplen = len(os.path.split(abspfx)[0])
1771 striplen = len(os.path.split(abspfx)[0])
1772 else:
1772 else:
1773 striplen = len(abspfx)
1773 striplen = len(abspfx)
1774 if striplen:
1774 if striplen:
1775 striplen += len(pycompat.ossep)
1775 striplen += len(pycompat.ossep)
1776 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1776 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1777 elif destdirexists:
1777 elif destdirexists:
1778 res = lambda p: os.path.join(
1778 res = lambda p: os.path.join(
1779 dest, os.path.basename(util.localpath(p))
1779 dest, os.path.basename(util.localpath(p))
1780 )
1780 )
1781 else:
1781 else:
1782 res = lambda p: dest
1782 res = lambda p: dest
1783 return res
1783 return res
1784
1784
1785 # pat: ossep
1785 # pat: ossep
1786 # dest ossep
1786 # dest ossep
1787 # srcs: list of (hgsep, hgsep, ossep, bool)
1787 # srcs: list of (hgsep, hgsep, ossep, bool)
1788 # return: function that takes hgsep and returns ossep
1788 # return: function that takes hgsep and returns ossep
1789 def targetpathafterfn(pat, dest, srcs):
1789 def targetpathafterfn(pat, dest, srcs):
1790 if matchmod.patkind(pat):
1790 if matchmod.patkind(pat):
1791 # a mercurial pattern
1791 # a mercurial pattern
1792 res = lambda p: os.path.join(
1792 res = lambda p: os.path.join(
1793 dest, os.path.basename(util.localpath(p))
1793 dest, os.path.basename(util.localpath(p))
1794 )
1794 )
1795 else:
1795 else:
1796 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1796 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1797 if len(abspfx) < len(srcs[0][0]):
1797 if len(abspfx) < len(srcs[0][0]):
1798 # A directory. Either the target path contains the last
1798 # A directory. Either the target path contains the last
1799 # component of the source path or it does not.
1799 # component of the source path or it does not.
1800 def evalpath(striplen):
1800 def evalpath(striplen):
1801 score = 0
1801 score = 0
1802 for s in srcs:
1802 for s in srcs:
1803 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1803 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1804 if os.path.lexists(t):
1804 if os.path.lexists(t):
1805 score += 1
1805 score += 1
1806 return score
1806 return score
1807
1807
1808 abspfx = util.localpath(abspfx)
1808 abspfx = util.localpath(abspfx)
1809 striplen = len(abspfx)
1809 striplen = len(abspfx)
1810 if striplen:
1810 if striplen:
1811 striplen += len(pycompat.ossep)
1811 striplen += len(pycompat.ossep)
1812 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1812 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1813 score = evalpath(striplen)
1813 score = evalpath(striplen)
1814 striplen1 = len(os.path.split(abspfx)[0])
1814 striplen1 = len(os.path.split(abspfx)[0])
1815 if striplen1:
1815 if striplen1:
1816 striplen1 += len(pycompat.ossep)
1816 striplen1 += len(pycompat.ossep)
1817 if evalpath(striplen1) > score:
1817 if evalpath(striplen1) > score:
1818 striplen = striplen1
1818 striplen = striplen1
1819 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1819 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1820 else:
1820 else:
1821 # a file
1821 # a file
1822 if destdirexists:
1822 if destdirexists:
1823 res = lambda p: os.path.join(
1823 res = lambda p: os.path.join(
1824 dest, os.path.basename(util.localpath(p))
1824 dest, os.path.basename(util.localpath(p))
1825 )
1825 )
1826 else:
1826 else:
1827 res = lambda p: dest
1827 res = lambda p: dest
1828 return res
1828 return res
1829
1829
1830 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1830 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1831 if not destdirexists:
1831 if not destdirexists:
1832 if len(pats) > 1 or matchmod.patkind(pats[0]):
1832 if len(pats) > 1 or matchmod.patkind(pats[0]):
1833 raise error.InputError(
1833 raise error.InputError(
1834 _(
1834 _(
1835 b'with multiple sources, destination must be an '
1835 b'with multiple sources, destination must be an '
1836 b'existing directory'
1836 b'existing directory'
1837 )
1837 )
1838 )
1838 )
1839 if util.endswithsep(dest):
1839 if util.endswithsep(dest):
1840 raise error.InputError(
1840 raise error.InputError(
1841 _(b'destination %s is not a directory') % dest
1841 _(b'destination %s is not a directory') % dest
1842 )
1842 )
1843
1843
1844 tfn = targetpathfn
1844 tfn = targetpathfn
1845 if after:
1845 if after:
1846 tfn = targetpathafterfn
1846 tfn = targetpathafterfn
1847 copylist = []
1847 copylist = []
1848 for pat in pats:
1848 for pat in pats:
1849 srcs = walkpat(pat)
1849 srcs = walkpat(pat)
1850 if not srcs:
1850 if not srcs:
1851 continue
1851 continue
1852 copylist.append((tfn(pat, dest, srcs), srcs))
1852 copylist.append((tfn(pat, dest, srcs), srcs))
1853 if not copylist:
1853 if not copylist:
1854 hint = None
1854 hint = None
1855 if rename:
1855 if rename:
1856 hint = _(b'maybe you meant to use --after --at-rev=.')
1856 hint = _(b'maybe you meant to use --after --at-rev=.')
1857 raise error.InputError(_(b'no files to copy'), hint=hint)
1857 raise error.InputError(_(b'no files to copy'), hint=hint)
1858
1858
1859 errors = 0
1859 errors = 0
1860 for targetpath, srcs in copylist:
1860 for targetpath, srcs in copylist:
1861 for abssrc, relsrc, exact in srcs:
1861 for abssrc, relsrc, exact in srcs:
1862 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1862 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1863 errors += 1
1863 errors += 1
1864
1864
1865 return errors != 0
1865 return errors != 0
1866
1866
1867
1867
1868 ## facility to let extension process additional data into an import patch
1868 ## facility to let extension process additional data into an import patch
1869 # list of identifier to be executed in order
1869 # list of identifier to be executed in order
1870 extrapreimport = [] # run before commit
1870 extrapreimport = [] # run before commit
1871 extrapostimport = [] # run after commit
1871 extrapostimport = [] # run after commit
1872 # mapping from identifier to actual import function
1872 # mapping from identifier to actual import function
1873 #
1873 #
1874 # 'preimport' are run before the commit is made and are provided the following
1874 # 'preimport' are run before the commit is made and are provided the following
1875 # arguments:
1875 # arguments:
1876 # - repo: the localrepository instance,
1876 # - repo: the localrepository instance,
1877 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1877 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1878 # - extra: the future extra dictionary of the changeset, please mutate it,
1878 # - extra: the future extra dictionary of the changeset, please mutate it,
1879 # - opts: the import options.
1879 # - opts: the import options.
1880 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1880 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1881 # mutation of in memory commit and more. Feel free to rework the code to get
1881 # mutation of in memory commit and more. Feel free to rework the code to get
1882 # there.
1882 # there.
1883 extrapreimportmap = {}
1883 extrapreimportmap = {}
1884 # 'postimport' are run after the commit is made and are provided the following
1884 # 'postimport' are run after the commit is made and are provided the following
1885 # argument:
1885 # argument:
1886 # - ctx: the changectx created by import.
1886 # - ctx: the changectx created by import.
1887 extrapostimportmap = {}
1887 extrapostimportmap = {}
1888
1888
1889
1889
1890 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1890 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1891 """Utility function used by commands.import to import a single patch
1891 """Utility function used by commands.import to import a single patch
1892
1892
1893 This function is explicitly defined here to help the evolve extension to
1893 This function is explicitly defined here to help the evolve extension to
1894 wrap this part of the import logic.
1894 wrap this part of the import logic.
1895
1895
1896 The API is currently a bit ugly because it a simple code translation from
1896 The API is currently a bit ugly because it a simple code translation from
1897 the import command. Feel free to make it better.
1897 the import command. Feel free to make it better.
1898
1898
1899 :patchdata: a dictionary containing parsed patch data (such as from
1899 :patchdata: a dictionary containing parsed patch data (such as from
1900 ``patch.extract()``)
1900 ``patch.extract()``)
1901 :parents: nodes that will be parent of the created commit
1901 :parents: nodes that will be parent of the created commit
1902 :opts: the full dict of option passed to the import command
1902 :opts: the full dict of option passed to the import command
1903 :msgs: list to save commit message to.
1903 :msgs: list to save commit message to.
1904 (used in case we need to save it when failing)
1904 (used in case we need to save it when failing)
1905 :updatefunc: a function that update a repo to a given node
1905 :updatefunc: a function that update a repo to a given node
1906 updatefunc(<repo>, <node>)
1906 updatefunc(<repo>, <node>)
1907 """
1907 """
1908 # avoid cycle context -> subrepo -> cmdutil
1908 # avoid cycle context -> subrepo -> cmdutil
1909 from . import context
1909 from . import context
1910
1910
1911 tmpname = patchdata.get(b'filename')
1911 tmpname = patchdata.get(b'filename')
1912 message = patchdata.get(b'message')
1912 message = patchdata.get(b'message')
1913 user = opts.get(b'user') or patchdata.get(b'user')
1913 user = opts.get(b'user') or patchdata.get(b'user')
1914 date = opts.get(b'date') or patchdata.get(b'date')
1914 date = opts.get(b'date') or patchdata.get(b'date')
1915 branch = patchdata.get(b'branch')
1915 branch = patchdata.get(b'branch')
1916 nodeid = patchdata.get(b'nodeid')
1916 nodeid = patchdata.get(b'nodeid')
1917 p1 = patchdata.get(b'p1')
1917 p1 = patchdata.get(b'p1')
1918 p2 = patchdata.get(b'p2')
1918 p2 = patchdata.get(b'p2')
1919
1919
1920 nocommit = opts.get(b'no_commit')
1920 nocommit = opts.get(b'no_commit')
1921 importbranch = opts.get(b'import_branch')
1921 importbranch = opts.get(b'import_branch')
1922 update = not opts.get(b'bypass')
1922 update = not opts.get(b'bypass')
1923 strip = opts[b"strip"]
1923 strip = opts[b"strip"]
1924 prefix = opts[b"prefix"]
1924 prefix = opts[b"prefix"]
1925 sim = float(opts.get(b'similarity') or 0)
1925 sim = float(opts.get(b'similarity') or 0)
1926
1926
1927 if not tmpname:
1927 if not tmpname:
1928 return None, None, False
1928 return None, None, False
1929
1929
1930 rejects = False
1930 rejects = False
1931
1931
1932 cmdline_message = logmessage(ui, opts)
1932 cmdline_message = logmessage(ui, opts)
1933 if cmdline_message:
1933 if cmdline_message:
1934 # pickup the cmdline msg
1934 # pickup the cmdline msg
1935 message = cmdline_message
1935 message = cmdline_message
1936 elif message:
1936 elif message:
1937 # pickup the patch msg
1937 # pickup the patch msg
1938 message = message.strip()
1938 message = message.strip()
1939 else:
1939 else:
1940 # launch the editor
1940 # launch the editor
1941 message = None
1941 message = None
1942 ui.debug(b'message:\n%s\n' % (message or b''))
1942 ui.debug(b'message:\n%s\n' % (message or b''))
1943
1943
1944 if len(parents) == 1:
1944 if len(parents) == 1:
1945 parents.append(repo[nullrev])
1945 parents.append(repo[nullrev])
1946 if opts.get(b'exact'):
1946 if opts.get(b'exact'):
1947 if not nodeid or not p1:
1947 if not nodeid or not p1:
1948 raise error.InputError(_(b'not a Mercurial patch'))
1948 raise error.InputError(_(b'not a Mercurial patch'))
1949 p1 = repo[p1]
1949 p1 = repo[p1]
1950 p2 = repo[p2 or nullrev]
1950 p2 = repo[p2 or nullrev]
1951 elif p2:
1951 elif p2:
1952 try:
1952 try:
1953 p1 = repo[p1]
1953 p1 = repo[p1]
1954 p2 = repo[p2]
1954 p2 = repo[p2]
1955 # Without any options, consider p2 only if the
1955 # Without any options, consider p2 only if the
1956 # patch is being applied on top of the recorded
1956 # patch is being applied on top of the recorded
1957 # first parent.
1957 # first parent.
1958 if p1 != parents[0]:
1958 if p1 != parents[0]:
1959 p1 = parents[0]
1959 p1 = parents[0]
1960 p2 = repo[nullrev]
1960 p2 = repo[nullrev]
1961 except error.RepoError:
1961 except error.RepoError:
1962 p1, p2 = parents
1962 p1, p2 = parents
1963 if p2.rev() == nullrev:
1963 if p2.rev() == nullrev:
1964 ui.warn(
1964 ui.warn(
1965 _(
1965 _(
1966 b"warning: import the patch as a normal revision\n"
1966 b"warning: import the patch as a normal revision\n"
1967 b"(use --exact to import the patch as a merge)\n"
1967 b"(use --exact to import the patch as a merge)\n"
1968 )
1968 )
1969 )
1969 )
1970 else:
1970 else:
1971 p1, p2 = parents
1971 p1, p2 = parents
1972
1972
1973 n = None
1973 n = None
1974 if update:
1974 if update:
1975 if p1 != parents[0]:
1975 if p1 != parents[0]:
1976 updatefunc(repo, p1.node())
1976 updatefunc(repo, p1.node())
1977 if p2 != parents[1]:
1977 if p2 != parents[1]:
1978 repo.setparents(p1.node(), p2.node())
1978 repo.setparents(p1.node(), p2.node())
1979
1979
1980 if opts.get(b'exact') or importbranch:
1980 if opts.get(b'exact') or importbranch:
1981 repo.dirstate.setbranch(branch or b'default')
1981 repo.dirstate.setbranch(branch or b'default')
1982
1982
1983 partial = opts.get(b'partial', False)
1983 partial = opts.get(b'partial', False)
1984 files = set()
1984 files = set()
1985 try:
1985 try:
1986 patch.patch(
1986 patch.patch(
1987 ui,
1987 ui,
1988 repo,
1988 repo,
1989 tmpname,
1989 tmpname,
1990 strip=strip,
1990 strip=strip,
1991 prefix=prefix,
1991 prefix=prefix,
1992 files=files,
1992 files=files,
1993 eolmode=None,
1993 eolmode=None,
1994 similarity=sim / 100.0,
1994 similarity=sim / 100.0,
1995 )
1995 )
1996 except error.PatchError as e:
1996 except error.PatchError as e:
1997 if not partial:
1997 if not partial:
1998 raise error.Abort(pycompat.bytestr(e))
1998 raise error.Abort(pycompat.bytestr(e))
1999 if partial:
1999 if partial:
2000 rejects = True
2000 rejects = True
2001
2001
2002 files = list(files)
2002 files = list(files)
2003 if nocommit:
2003 if nocommit:
2004 if message:
2004 if message:
2005 msgs.append(message)
2005 msgs.append(message)
2006 else:
2006 else:
2007 if opts.get(b'exact') or p2:
2007 if opts.get(b'exact') or p2:
2008 # If you got here, you either use --force and know what
2008 # If you got here, you either use --force and know what
2009 # you are doing or used --exact or a merge patch while
2009 # you are doing or used --exact or a merge patch while
2010 # being updated to its first parent.
2010 # being updated to its first parent.
2011 m = None
2011 m = None
2012 else:
2012 else:
2013 m = scmutil.matchfiles(repo, files or [])
2013 m = scmutil.matchfiles(repo, files or [])
2014 editform = mergeeditform(repo[None], b'import.normal')
2014 editform = mergeeditform(repo[None], b'import.normal')
2015 if opts.get(b'exact'):
2015 if opts.get(b'exact'):
2016 editor = None
2016 editor = None
2017 else:
2017 else:
2018 editor = getcommiteditor(
2018 editor = getcommiteditor(
2019 editform=editform, **pycompat.strkwargs(opts)
2019 editform=editform, **pycompat.strkwargs(opts)
2020 )
2020 )
2021 extra = {}
2021 extra = {}
2022 for idfunc in extrapreimport:
2022 for idfunc in extrapreimport:
2023 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2023 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2024 overrides = {}
2024 overrides = {}
2025 if partial:
2025 if partial:
2026 overrides[(b'ui', b'allowemptycommit')] = True
2026 overrides[(b'ui', b'allowemptycommit')] = True
2027 if opts.get(b'secret'):
2027 if opts.get(b'secret'):
2028 overrides[(b'phases', b'new-commit')] = b'secret'
2028 overrides[(b'phases', b'new-commit')] = b'secret'
2029 with repo.ui.configoverride(overrides, b'import'):
2029 with repo.ui.configoverride(overrides, b'import'):
2030 n = repo.commit(
2030 n = repo.commit(
2031 message, user, date, match=m, editor=editor, extra=extra
2031 message, user, date, match=m, editor=editor, extra=extra
2032 )
2032 )
2033 for idfunc in extrapostimport:
2033 for idfunc in extrapostimport:
2034 extrapostimportmap[idfunc](repo[n])
2034 extrapostimportmap[idfunc](repo[n])
2035 else:
2035 else:
2036 if opts.get(b'exact') or importbranch:
2036 if opts.get(b'exact') or importbranch:
2037 branch = branch or b'default'
2037 branch = branch or b'default'
2038 else:
2038 else:
2039 branch = p1.branch()
2039 branch = p1.branch()
2040 store = patch.filestore()
2040 store = patch.filestore()
2041 try:
2041 try:
2042 files = set()
2042 files = set()
2043 try:
2043 try:
2044 patch.patchrepo(
2044 patch.patchrepo(
2045 ui,
2045 ui,
2046 repo,
2046 repo,
2047 p1,
2047 p1,
2048 store,
2048 store,
2049 tmpname,
2049 tmpname,
2050 strip,
2050 strip,
2051 prefix,
2051 prefix,
2052 files,
2052 files,
2053 eolmode=None,
2053 eolmode=None,
2054 )
2054 )
2055 except error.PatchError as e:
2055 except error.PatchError as e:
2056 raise error.Abort(stringutil.forcebytestr(e))
2056 raise error.Abort(stringutil.forcebytestr(e))
2057 if opts.get(b'exact'):
2057 if opts.get(b'exact'):
2058 editor = None
2058 editor = None
2059 else:
2059 else:
2060 editor = getcommiteditor(editform=b'import.bypass')
2060 editor = getcommiteditor(editform=b'import.bypass')
2061 memctx = context.memctx(
2061 memctx = context.memctx(
2062 repo,
2062 repo,
2063 (p1.node(), p2.node()),
2063 (p1.node(), p2.node()),
2064 message,
2064 message,
2065 files=files,
2065 files=files,
2066 filectxfn=store,
2066 filectxfn=store,
2067 user=user,
2067 user=user,
2068 date=date,
2068 date=date,
2069 branch=branch,
2069 branch=branch,
2070 editor=editor,
2070 editor=editor,
2071 )
2071 )
2072
2072
2073 overrides = {}
2073 overrides = {}
2074 if opts.get(b'secret'):
2074 if opts.get(b'secret'):
2075 overrides[(b'phases', b'new-commit')] = b'secret'
2075 overrides[(b'phases', b'new-commit')] = b'secret'
2076 with repo.ui.configoverride(overrides, b'import'):
2076 with repo.ui.configoverride(overrides, b'import'):
2077 n = memctx.commit()
2077 n = memctx.commit()
2078 finally:
2078 finally:
2079 store.close()
2079 store.close()
2080 if opts.get(b'exact') and nocommit:
2080 if opts.get(b'exact') and nocommit:
2081 # --exact with --no-commit is still useful in that it does merge
2081 # --exact with --no-commit is still useful in that it does merge
2082 # and branch bits
2082 # and branch bits
2083 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2083 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2084 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2084 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2085 raise error.Abort(_(b'patch is damaged or loses information'))
2085 raise error.Abort(_(b'patch is damaged or loses information'))
2086 msg = _(b'applied to working directory')
2086 msg = _(b'applied to working directory')
2087 if n:
2087 if n:
2088 # i18n: refers to a short changeset id
2088 # i18n: refers to a short changeset id
2089 msg = _(b'created %s') % short(n)
2089 msg = _(b'created %s') % short(n)
2090 return msg, n, rejects
2090 return msg, n, rejects
2091
2091
2092
2092
2093 # facility to let extensions include additional data in an exported patch
2093 # facility to let extensions include additional data in an exported patch
2094 # list of identifiers to be executed in order
2094 # list of identifiers to be executed in order
2095 extraexport = []
2095 extraexport = []
2096 # mapping from identifier to actual export function
2096 # mapping from identifier to actual export function
2097 # function as to return a string to be added to the header or None
2097 # function as to return a string to be added to the header or None
2098 # it is given two arguments (sequencenumber, changectx)
2098 # it is given two arguments (sequencenumber, changectx)
2099 extraexportmap = {}
2099 extraexportmap = {}
2100
2100
2101
2101
2102 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2102 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2103 node = scmutil.binnode(ctx)
2103 node = scmutil.binnode(ctx)
2104 parents = [p.node() for p in ctx.parents() if p]
2104 parents = [p.node() for p in ctx.parents() if p]
2105 branch = ctx.branch()
2105 branch = ctx.branch()
2106 if switch_parent:
2106 if switch_parent:
2107 parents.reverse()
2107 parents.reverse()
2108
2108
2109 if parents:
2109 if parents:
2110 prev = parents[0]
2110 prev = parents[0]
2111 else:
2111 else:
2112 prev = repo.nullid
2112 prev = repo.nullid
2113
2113
2114 fm.context(ctx=ctx)
2114 fm.context(ctx=ctx)
2115 fm.plain(b'# HG changeset patch\n')
2115 fm.plain(b'# HG changeset patch\n')
2116 fm.write(b'user', b'# User %s\n', ctx.user())
2116 fm.write(b'user', b'# User %s\n', ctx.user())
2117 fm.plain(b'# Date %d %d\n' % ctx.date())
2117 fm.plain(b'# Date %d %d\n' % ctx.date())
2118 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2118 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2119 fm.condwrite(
2119 fm.condwrite(
2120 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2120 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2121 )
2121 )
2122 fm.write(b'node', b'# Node ID %s\n', hex(node))
2122 fm.write(b'node', b'# Node ID %s\n', hex(node))
2123 fm.plain(b'# Parent %s\n' % hex(prev))
2123 fm.plain(b'# Parent %s\n' % hex(prev))
2124 if len(parents) > 1:
2124 if len(parents) > 1:
2125 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2125 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2126 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2126 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2127
2127
2128 # TODO: redesign extraexportmap function to support formatter
2128 # TODO: redesign extraexportmap function to support formatter
2129 for headerid in extraexport:
2129 for headerid in extraexport:
2130 header = extraexportmap[headerid](seqno, ctx)
2130 header = extraexportmap[headerid](seqno, ctx)
2131 if header is not None:
2131 if header is not None:
2132 fm.plain(b'# %s\n' % header)
2132 fm.plain(b'# %s\n' % header)
2133
2133
2134 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2134 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2135 fm.plain(b'\n')
2135 fm.plain(b'\n')
2136
2136
2137 if fm.isplain():
2137 if fm.isplain():
2138 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2138 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2139 for chunk, label in chunkiter:
2139 for chunk, label in chunkiter:
2140 fm.plain(chunk, label=label)
2140 fm.plain(chunk, label=label)
2141 else:
2141 else:
2142 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2142 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2143 # TODO: make it structured?
2143 # TODO: make it structured?
2144 fm.data(diff=b''.join(chunkiter))
2144 fm.data(diff=b''.join(chunkiter))
2145
2145
2146
2146
2147 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2147 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2148 """Export changesets to stdout or a single file"""
2148 """Export changesets to stdout or a single file"""
2149 for seqno, rev in enumerate(revs, 1):
2149 for seqno, rev in enumerate(revs, 1):
2150 ctx = repo[rev]
2150 ctx = repo[rev]
2151 if not dest.startswith(b'<'):
2151 if not dest.startswith(b'<'):
2152 repo.ui.note(b"%s\n" % dest)
2152 repo.ui.note(b"%s\n" % dest)
2153 fm.startitem()
2153 fm.startitem()
2154 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2154 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2155
2155
2156
2156
2157 def _exportfntemplate(
2157 def _exportfntemplate(
2158 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2158 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2159 ):
2159 ):
2160 """Export changesets to possibly multiple files"""
2160 """Export changesets to possibly multiple files"""
2161 total = len(revs)
2161 total = len(revs)
2162 revwidth = max(len(str(rev)) for rev in revs)
2162 revwidth = max(len(str(rev)) for rev in revs)
2163 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2163 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2164
2164
2165 for seqno, rev in enumerate(revs, 1):
2165 for seqno, rev in enumerate(revs, 1):
2166 ctx = repo[rev]
2166 ctx = repo[rev]
2167 dest = makefilename(
2167 dest = makefilename(
2168 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2168 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2169 )
2169 )
2170 filemap.setdefault(dest, []).append((seqno, rev))
2170 filemap.setdefault(dest, []).append((seqno, rev))
2171
2171
2172 for dest in filemap:
2172 for dest in filemap:
2173 with formatter.maybereopen(basefm, dest) as fm:
2173 with formatter.maybereopen(basefm, dest) as fm:
2174 repo.ui.note(b"%s\n" % dest)
2174 repo.ui.note(b"%s\n" % dest)
2175 for seqno, rev in filemap[dest]:
2175 for seqno, rev in filemap[dest]:
2176 fm.startitem()
2176 fm.startitem()
2177 ctx = repo[rev]
2177 ctx = repo[rev]
2178 _exportsingle(
2178 _exportsingle(
2179 repo, ctx, fm, match, switch_parent, seqno, diffopts
2179 repo, ctx, fm, match, switch_parent, seqno, diffopts
2180 )
2180 )
2181
2181
2182
2182
2183 def _prefetchchangedfiles(repo, revs, match):
2183 def _prefetchchangedfiles(repo, revs, match):
2184 allfiles = set()
2184 allfiles = set()
2185 for rev in revs:
2185 for rev in revs:
2186 for file in repo[rev].files():
2186 for file in repo[rev].files():
2187 if not match or match(file):
2187 if not match or match(file):
2188 allfiles.add(file)
2188 allfiles.add(file)
2189 match = scmutil.matchfiles(repo, allfiles)
2189 match = scmutil.matchfiles(repo, allfiles)
2190 revmatches = [(rev, match) for rev in revs]
2190 revmatches = [(rev, match) for rev in revs]
2191 scmutil.prefetchfiles(repo, revmatches)
2191 scmutil.prefetchfiles(repo, revmatches)
2192
2192
2193
2193
2194 def export(
2194 def export(
2195 repo,
2195 repo,
2196 revs,
2196 revs,
2197 basefm,
2197 basefm,
2198 fntemplate=b'hg-%h.patch',
2198 fntemplate=b'hg-%h.patch',
2199 switch_parent=False,
2199 switch_parent=False,
2200 opts=None,
2200 opts=None,
2201 match=None,
2201 match=None,
2202 ):
2202 ):
2203 """export changesets as hg patches
2203 """export changesets as hg patches
2204
2204
2205 Args:
2205 Args:
2206 repo: The repository from which we're exporting revisions.
2206 repo: The repository from which we're exporting revisions.
2207 revs: A list of revisions to export as revision numbers.
2207 revs: A list of revisions to export as revision numbers.
2208 basefm: A formatter to which patches should be written.
2208 basefm: A formatter to which patches should be written.
2209 fntemplate: An optional string to use for generating patch file names.
2209 fntemplate: An optional string to use for generating patch file names.
2210 switch_parent: If True, show diffs against second parent when not nullid.
2210 switch_parent: If True, show diffs against second parent when not nullid.
2211 Default is false, which always shows diff against p1.
2211 Default is false, which always shows diff against p1.
2212 opts: diff options to use for generating the patch.
2212 opts: diff options to use for generating the patch.
2213 match: If specified, only export changes to files matching this matcher.
2213 match: If specified, only export changes to files matching this matcher.
2214
2214
2215 Returns:
2215 Returns:
2216 Nothing.
2216 Nothing.
2217
2217
2218 Side Effect:
2218 Side Effect:
2219 "HG Changeset Patch" data is emitted to one of the following
2219 "HG Changeset Patch" data is emitted to one of the following
2220 destinations:
2220 destinations:
2221 fntemplate specified: Each rev is written to a unique file named using
2221 fntemplate specified: Each rev is written to a unique file named using
2222 the given template.
2222 the given template.
2223 Otherwise: All revs will be written to basefm.
2223 Otherwise: All revs will be written to basefm.
2224 """
2224 """
2225 _prefetchchangedfiles(repo, revs, match)
2225 _prefetchchangedfiles(repo, revs, match)
2226
2226
2227 if not fntemplate:
2227 if not fntemplate:
2228 _exportfile(
2228 _exportfile(
2229 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2229 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2230 )
2230 )
2231 else:
2231 else:
2232 _exportfntemplate(
2232 _exportfntemplate(
2233 repo, revs, basefm, fntemplate, switch_parent, opts, match
2233 repo, revs, basefm, fntemplate, switch_parent, opts, match
2234 )
2234 )
2235
2235
2236
2236
2237 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2237 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2238 """Export changesets to the given file stream"""
2238 """Export changesets to the given file stream"""
2239 _prefetchchangedfiles(repo, revs, match)
2239 _prefetchchangedfiles(repo, revs, match)
2240
2240
2241 dest = getattr(fp, 'name', b'<unnamed>')
2241 dest = getattr(fp, 'name', b'<unnamed>')
2242 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2242 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2243 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2243 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2244
2244
2245
2245
2246 def showmarker(fm, marker, index=None):
2246 def showmarker(fm, marker, index=None):
2247 """utility function to display obsolescence marker in a readable way
2247 """utility function to display obsolescence marker in a readable way
2248
2248
2249 To be used by debug function."""
2249 To be used by debug function."""
2250 if index is not None:
2250 if index is not None:
2251 fm.write(b'index', b'%i ', index)
2251 fm.write(b'index', b'%i ', index)
2252 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2252 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2253 succs = marker.succnodes()
2253 succs = marker.succnodes()
2254 fm.condwrite(
2254 fm.condwrite(
2255 succs,
2255 succs,
2256 b'succnodes',
2256 b'succnodes',
2257 b'%s ',
2257 b'%s ',
2258 fm.formatlist(map(hex, succs), name=b'node'),
2258 fm.formatlist(map(hex, succs), name=b'node'),
2259 )
2259 )
2260 fm.write(b'flag', b'%X ', marker.flags())
2260 fm.write(b'flag', b'%X ', marker.flags())
2261 parents = marker.parentnodes()
2261 parents = marker.parentnodes()
2262 if parents is not None:
2262 if parents is not None:
2263 fm.write(
2263 fm.write(
2264 b'parentnodes',
2264 b'parentnodes',
2265 b'{%s} ',
2265 b'{%s} ',
2266 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2266 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2267 )
2267 )
2268 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2268 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2269 meta = marker.metadata().copy()
2269 meta = marker.metadata().copy()
2270 meta.pop(b'date', None)
2270 meta.pop(b'date', None)
2271 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2271 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2272 fm.write(
2272 fm.write(
2273 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2273 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2274 )
2274 )
2275 fm.plain(b'\n')
2275 fm.plain(b'\n')
2276
2276
2277
2277
2278 def finddate(ui, repo, date):
2278 def finddate(ui, repo, date):
2279 """Find the tipmost changeset that matches the given date spec"""
2279 """Find the tipmost changeset that matches the given date spec"""
2280 mrevs = repo.revs(b'date(%s)', date)
2280 mrevs = repo.revs(b'date(%s)', date)
2281 try:
2281 try:
2282 rev = mrevs.max()
2282 rev = mrevs.max()
2283 except ValueError:
2283 except ValueError:
2284 raise error.InputError(_(b"revision matching date not found"))
2284 raise error.InputError(_(b"revision matching date not found"))
2285
2285
2286 ui.status(
2286 ui.status(
2287 _(b"found revision %d from %s\n")
2287 _(b"found revision %d from %s\n")
2288 % (rev, dateutil.datestr(repo[rev].date()))
2288 % (rev, dateutil.datestr(repo[rev].date()))
2289 )
2289 )
2290 return b'%d' % rev
2290 return b'%d' % rev
2291
2291
2292
2292
2293 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2293 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2294 bad = []
2294 bad = []
2295
2295
2296 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2296 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2297 names = []
2297 names = []
2298 wctx = repo[None]
2298 wctx = repo[None]
2299 cca = None
2299 cca = None
2300 abort, warn = scmutil.checkportabilityalert(ui)
2300 abort, warn = scmutil.checkportabilityalert(ui)
2301 if abort or warn:
2301 if abort or warn:
2302 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2302 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2303
2303
2304 match = repo.narrowmatch(match, includeexact=True)
2304 match = repo.narrowmatch(match, includeexact=True)
2305 badmatch = matchmod.badmatch(match, badfn)
2305 badmatch = matchmod.badmatch(match, badfn)
2306 dirstate = repo.dirstate
2306 dirstate = repo.dirstate
2307 # We don't want to just call wctx.walk here, since it would return a lot of
2307 # We don't want to just call wctx.walk here, since it would return a lot of
2308 # clean files, which we aren't interested in and takes time.
2308 # clean files, which we aren't interested in and takes time.
2309 for f in sorted(
2309 for f in sorted(
2310 dirstate.walk(
2310 dirstate.walk(
2311 badmatch,
2311 badmatch,
2312 subrepos=sorted(wctx.substate),
2312 subrepos=sorted(wctx.substate),
2313 unknown=True,
2313 unknown=True,
2314 ignored=False,
2314 ignored=False,
2315 full=False,
2315 full=False,
2316 )
2316 )
2317 ):
2317 ):
2318 exact = match.exact(f)
2318 exact = match.exact(f)
2319 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2319 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2320 if cca:
2320 if cca:
2321 cca(f)
2321 cca(f)
2322 names.append(f)
2322 names.append(f)
2323 if ui.verbose or not exact:
2323 if ui.verbose or not exact:
2324 ui.status(
2324 ui.status(
2325 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2325 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2326 )
2326 )
2327
2327
2328 for subpath in sorted(wctx.substate):
2328 for subpath in sorted(wctx.substate):
2329 sub = wctx.sub(subpath)
2329 sub = wctx.sub(subpath)
2330 try:
2330 try:
2331 submatch = matchmod.subdirmatcher(subpath, match)
2331 submatch = matchmod.subdirmatcher(subpath, match)
2332 subprefix = repo.wvfs.reljoin(prefix, subpath)
2332 subprefix = repo.wvfs.reljoin(prefix, subpath)
2333 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2333 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2334 if opts.get('subrepos'):
2334 if opts.get('subrepos'):
2335 bad.extend(
2335 bad.extend(
2336 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2336 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2337 )
2337 )
2338 else:
2338 else:
2339 bad.extend(
2339 bad.extend(
2340 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2340 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2341 )
2341 )
2342 except error.LookupError:
2342 except error.LookupError:
2343 ui.status(
2343 ui.status(
2344 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2344 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2345 )
2345 )
2346
2346
2347 if not opts.get('dry_run'):
2347 if not opts.get('dry_run'):
2348 rejected = wctx.add(names, prefix)
2348 rejected = wctx.add(names, prefix)
2349 bad.extend(f for f in rejected if f in match.files())
2349 bad.extend(f for f in rejected if f in match.files())
2350 return bad
2350 return bad
2351
2351
2352
2352
2353 def addwebdirpath(repo, serverpath, webconf):
2353 def addwebdirpath(repo, serverpath, webconf):
2354 webconf[serverpath] = repo.root
2354 webconf[serverpath] = repo.root
2355 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2355 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2356
2356
2357 for r in repo.revs(b'filelog("path:.hgsub")'):
2357 for r in repo.revs(b'filelog("path:.hgsub")'):
2358 ctx = repo[r]
2358 ctx = repo[r]
2359 for subpath in ctx.substate:
2359 for subpath in ctx.substate:
2360 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2360 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2361
2361
2362
2362
2363 def forget(
2363 def forget(
2364 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2364 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2365 ):
2365 ):
2366 if dryrun and interactive:
2366 if dryrun and interactive:
2367 raise error.InputError(
2367 raise error.InputError(
2368 _(b"cannot specify both --dry-run and --interactive")
2368 _(b"cannot specify both --dry-run and --interactive")
2369 )
2369 )
2370 bad = []
2370 bad = []
2371 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2371 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2372 wctx = repo[None]
2372 wctx = repo[None]
2373 forgot = []
2373 forgot = []
2374
2374
2375 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2375 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2376 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2376 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2377 if explicitonly:
2377 if explicitonly:
2378 forget = [f for f in forget if match.exact(f)]
2378 forget = [f for f in forget if match.exact(f)]
2379
2379
2380 for subpath in sorted(wctx.substate):
2380 for subpath in sorted(wctx.substate):
2381 sub = wctx.sub(subpath)
2381 sub = wctx.sub(subpath)
2382 submatch = matchmod.subdirmatcher(subpath, match)
2382 submatch = matchmod.subdirmatcher(subpath, match)
2383 subprefix = repo.wvfs.reljoin(prefix, subpath)
2383 subprefix = repo.wvfs.reljoin(prefix, subpath)
2384 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2384 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2385 try:
2385 try:
2386 subbad, subforgot = sub.forget(
2386 subbad, subforgot = sub.forget(
2387 submatch,
2387 submatch,
2388 subprefix,
2388 subprefix,
2389 subuipathfn,
2389 subuipathfn,
2390 dryrun=dryrun,
2390 dryrun=dryrun,
2391 interactive=interactive,
2391 interactive=interactive,
2392 )
2392 )
2393 bad.extend([subpath + b'/' + f for f in subbad])
2393 bad.extend([subpath + b'/' + f for f in subbad])
2394 forgot.extend([subpath + b'/' + f for f in subforgot])
2394 forgot.extend([subpath + b'/' + f for f in subforgot])
2395 except error.LookupError:
2395 except error.LookupError:
2396 ui.status(
2396 ui.status(
2397 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2397 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2398 )
2398 )
2399
2399
2400 if not explicitonly:
2400 if not explicitonly:
2401 for f in match.files():
2401 for f in match.files():
2402 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2402 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2403 if f not in forgot:
2403 if f not in forgot:
2404 if repo.wvfs.exists(f):
2404 if repo.wvfs.exists(f):
2405 # Don't complain if the exact case match wasn't given.
2405 # Don't complain if the exact case match wasn't given.
2406 # But don't do this until after checking 'forgot', so
2406 # But don't do this until after checking 'forgot', so
2407 # that subrepo files aren't normalized, and this op is
2407 # that subrepo files aren't normalized, and this op is
2408 # purely from data cached by the status walk above.
2408 # purely from data cached by the status walk above.
2409 if repo.dirstate.normalize(f) in repo.dirstate:
2409 if repo.dirstate.normalize(f) in repo.dirstate:
2410 continue
2410 continue
2411 ui.warn(
2411 ui.warn(
2412 _(
2412 _(
2413 b'not removing %s: '
2413 b'not removing %s: '
2414 b'file is already untracked\n'
2414 b'file is already untracked\n'
2415 )
2415 )
2416 % uipathfn(f)
2416 % uipathfn(f)
2417 )
2417 )
2418 bad.append(f)
2418 bad.append(f)
2419
2419
2420 if interactive:
2420 if interactive:
2421 responses = _(
2421 responses = _(
2422 b'[Ynsa?]'
2422 b'[Ynsa?]'
2423 b'$$ &Yes, forget this file'
2423 b'$$ &Yes, forget this file'
2424 b'$$ &No, skip this file'
2424 b'$$ &No, skip this file'
2425 b'$$ &Skip remaining files'
2425 b'$$ &Skip remaining files'
2426 b'$$ Include &all remaining files'
2426 b'$$ Include &all remaining files'
2427 b'$$ &? (display help)'
2427 b'$$ &? (display help)'
2428 )
2428 )
2429 for filename in forget[:]:
2429 for filename in forget[:]:
2430 r = ui.promptchoice(
2430 r = ui.promptchoice(
2431 _(b'forget %s %s') % (uipathfn(filename), responses)
2431 _(b'forget %s %s') % (uipathfn(filename), responses)
2432 )
2432 )
2433 if r == 4: # ?
2433 if r == 4: # ?
2434 while r == 4:
2434 while r == 4:
2435 for c, t in ui.extractchoices(responses)[1]:
2435 for c, t in ui.extractchoices(responses)[1]:
2436 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2436 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2437 r = ui.promptchoice(
2437 r = ui.promptchoice(
2438 _(b'forget %s %s') % (uipathfn(filename), responses)
2438 _(b'forget %s %s') % (uipathfn(filename), responses)
2439 )
2439 )
2440 if r == 0: # yes
2440 if r == 0: # yes
2441 continue
2441 continue
2442 elif r == 1: # no
2442 elif r == 1: # no
2443 forget.remove(filename)
2443 forget.remove(filename)
2444 elif r == 2: # Skip
2444 elif r == 2: # Skip
2445 fnindex = forget.index(filename)
2445 fnindex = forget.index(filename)
2446 del forget[fnindex:]
2446 del forget[fnindex:]
2447 break
2447 break
2448 elif r == 3: # All
2448 elif r == 3: # All
2449 break
2449 break
2450
2450
2451 for f in forget:
2451 for f in forget:
2452 if ui.verbose or not match.exact(f) or interactive:
2452 if ui.verbose or not match.exact(f) or interactive:
2453 ui.status(
2453 ui.status(
2454 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2454 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2455 )
2455 )
2456
2456
2457 if not dryrun:
2457 if not dryrun:
2458 rejected = wctx.forget(forget, prefix)
2458 rejected = wctx.forget(forget, prefix)
2459 bad.extend(f for f in rejected if f in match.files())
2459 bad.extend(f for f in rejected if f in match.files())
2460 forgot.extend(f for f in forget if f not in rejected)
2460 forgot.extend(f for f in forget if f not in rejected)
2461 return bad, forgot
2461 return bad, forgot
2462
2462
2463
2463
2464 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2464 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2465 ret = 1
2465 ret = 1
2466
2466
2467 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2467 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2468 if fm.isplain() and not needsfctx:
2468 if fm.isplain() and not needsfctx:
2469 # Fast path. The speed-up comes from skipping the formatter, and batching
2469 # Fast path. The speed-up comes from skipping the formatter, and batching
2470 # calls to ui.write.
2470 # calls to ui.write.
2471 buf = []
2471 buf = []
2472 for f in ctx.matches(m):
2472 for f in ctx.matches(m):
2473 buf.append(fmt % uipathfn(f))
2473 buf.append(fmt % uipathfn(f))
2474 if len(buf) > 100:
2474 if len(buf) > 100:
2475 ui.write(b''.join(buf))
2475 ui.write(b''.join(buf))
2476 del buf[:]
2476 del buf[:]
2477 ret = 0
2477 ret = 0
2478 if buf:
2478 if buf:
2479 ui.write(b''.join(buf))
2479 ui.write(b''.join(buf))
2480 else:
2480 else:
2481 for f in ctx.matches(m):
2481 for f in ctx.matches(m):
2482 fm.startitem()
2482 fm.startitem()
2483 fm.context(ctx=ctx)
2483 fm.context(ctx=ctx)
2484 if needsfctx:
2484 if needsfctx:
2485 fc = ctx[f]
2485 fc = ctx[f]
2486 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2486 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2487 fm.data(path=f)
2487 fm.data(path=f)
2488 fm.plain(fmt % uipathfn(f))
2488 fm.plain(fmt % uipathfn(f))
2489 ret = 0
2489 ret = 0
2490
2490
2491 for subpath in sorted(ctx.substate):
2491 for subpath in sorted(ctx.substate):
2492 submatch = matchmod.subdirmatcher(subpath, m)
2492 submatch = matchmod.subdirmatcher(subpath, m)
2493 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2493 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2494 if subrepos or m.exact(subpath) or any(submatch.files()):
2494 if subrepos or m.exact(subpath) or any(submatch.files()):
2495 sub = ctx.sub(subpath)
2495 sub = ctx.sub(subpath)
2496 try:
2496 try:
2497 recurse = m.exact(subpath) or subrepos
2497 recurse = m.exact(subpath) or subrepos
2498 if (
2498 if (
2499 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2499 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2500 == 0
2500 == 0
2501 ):
2501 ):
2502 ret = 0
2502 ret = 0
2503 except error.LookupError:
2503 except error.LookupError:
2504 ui.status(
2504 ui.status(
2505 _(b"skipping missing subrepository: %s\n")
2505 _(b"skipping missing subrepository: %s\n")
2506 % uipathfn(subpath)
2506 % uipathfn(subpath)
2507 )
2507 )
2508
2508
2509 return ret
2509 return ret
2510
2510
2511
2511
2512 def remove(
2512 def remove(
2513 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2513 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2514 ):
2514 ):
2515 ret = 0
2515 ret = 0
2516 s = repo.status(match=m, clean=True)
2516 s = repo.status(match=m, clean=True)
2517 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2517 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2518
2518
2519 wctx = repo[None]
2519 wctx = repo[None]
2520
2520
2521 if warnings is None:
2521 if warnings is None:
2522 warnings = []
2522 warnings = []
2523 warn = True
2523 warn = True
2524 else:
2524 else:
2525 warn = False
2525 warn = False
2526
2526
2527 subs = sorted(wctx.substate)
2527 subs = sorted(wctx.substate)
2528 progress = ui.makeprogress(
2528 progress = ui.makeprogress(
2529 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2529 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2530 )
2530 )
2531 for subpath in subs:
2531 for subpath in subs:
2532 submatch = matchmod.subdirmatcher(subpath, m)
2532 submatch = matchmod.subdirmatcher(subpath, m)
2533 subprefix = repo.wvfs.reljoin(prefix, subpath)
2533 subprefix = repo.wvfs.reljoin(prefix, subpath)
2534 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2534 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2535 if subrepos or m.exact(subpath) or any(submatch.files()):
2535 if subrepos or m.exact(subpath) or any(submatch.files()):
2536 progress.increment()
2536 progress.increment()
2537 sub = wctx.sub(subpath)
2537 sub = wctx.sub(subpath)
2538 try:
2538 try:
2539 if sub.removefiles(
2539 if sub.removefiles(
2540 submatch,
2540 submatch,
2541 subprefix,
2541 subprefix,
2542 subuipathfn,
2542 subuipathfn,
2543 after,
2543 after,
2544 force,
2544 force,
2545 subrepos,
2545 subrepos,
2546 dryrun,
2546 dryrun,
2547 warnings,
2547 warnings,
2548 ):
2548 ):
2549 ret = 1
2549 ret = 1
2550 except error.LookupError:
2550 except error.LookupError:
2551 warnings.append(
2551 warnings.append(
2552 _(b"skipping missing subrepository: %s\n")
2552 _(b"skipping missing subrepository: %s\n")
2553 % uipathfn(subpath)
2553 % uipathfn(subpath)
2554 )
2554 )
2555 progress.complete()
2555 progress.complete()
2556
2556
2557 # warn about failure to delete explicit files/dirs
2557 # warn about failure to delete explicit files/dirs
2558 deleteddirs = pathutil.dirs(deleted)
2558 deleteddirs = pathutil.dirs(deleted)
2559 files = m.files()
2559 files = m.files()
2560 progress = ui.makeprogress(
2560 progress = ui.makeprogress(
2561 _(b'deleting'), total=len(files), unit=_(b'files')
2561 _(b'deleting'), total=len(files), unit=_(b'files')
2562 )
2562 )
2563 for f in files:
2563 for f in files:
2564
2564
2565 def insubrepo():
2565 def insubrepo():
2566 for subpath in wctx.substate:
2566 for subpath in wctx.substate:
2567 if f.startswith(subpath + b'/'):
2567 if f.startswith(subpath + b'/'):
2568 return True
2568 return True
2569 return False
2569 return False
2570
2570
2571 progress.increment()
2571 progress.increment()
2572 isdir = f in deleteddirs or wctx.hasdir(f)
2572 isdir = f in deleteddirs or wctx.hasdir(f)
2573 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2573 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2574 continue
2574 continue
2575
2575
2576 if repo.wvfs.exists(f):
2576 if repo.wvfs.exists(f):
2577 if repo.wvfs.isdir(f):
2577 if repo.wvfs.isdir(f):
2578 warnings.append(
2578 warnings.append(
2579 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2579 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2580 )
2580 )
2581 else:
2581 else:
2582 warnings.append(
2582 warnings.append(
2583 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2583 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2584 )
2584 )
2585 # missing files will generate a warning elsewhere
2585 # missing files will generate a warning elsewhere
2586 ret = 1
2586 ret = 1
2587 progress.complete()
2587 progress.complete()
2588
2588
2589 if force:
2589 if force:
2590 list = modified + deleted + clean + added
2590 list = modified + deleted + clean + added
2591 elif after:
2591 elif after:
2592 list = deleted
2592 list = deleted
2593 remaining = modified + added + clean
2593 remaining = modified + added + clean
2594 progress = ui.makeprogress(
2594 progress = ui.makeprogress(
2595 _(b'skipping'), total=len(remaining), unit=_(b'files')
2595 _(b'skipping'), total=len(remaining), unit=_(b'files')
2596 )
2596 )
2597 for f in remaining:
2597 for f in remaining:
2598 progress.increment()
2598 progress.increment()
2599 if ui.verbose or (f in files):
2599 if ui.verbose or (f in files):
2600 warnings.append(
2600 warnings.append(
2601 _(b'not removing %s: file still exists\n') % uipathfn(f)
2601 _(b'not removing %s: file still exists\n') % uipathfn(f)
2602 )
2602 )
2603 ret = 1
2603 ret = 1
2604 progress.complete()
2604 progress.complete()
2605 else:
2605 else:
2606 list = deleted + clean
2606 list = deleted + clean
2607 progress = ui.makeprogress(
2607 progress = ui.makeprogress(
2608 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2608 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2609 )
2609 )
2610 for f in modified:
2610 for f in modified:
2611 progress.increment()
2611 progress.increment()
2612 warnings.append(
2612 warnings.append(
2613 _(
2613 _(
2614 b'not removing %s: file is modified (use -f'
2614 b'not removing %s: file is modified (use -f'
2615 b' to force removal)\n'
2615 b' to force removal)\n'
2616 )
2616 )
2617 % uipathfn(f)
2617 % uipathfn(f)
2618 )
2618 )
2619 ret = 1
2619 ret = 1
2620 for f in added:
2620 for f in added:
2621 progress.increment()
2621 progress.increment()
2622 warnings.append(
2622 warnings.append(
2623 _(
2623 _(
2624 b"not removing %s: file has been marked for add"
2624 b"not removing %s: file has been marked for add"
2625 b" (use 'hg forget' to undo add)\n"
2625 b" (use 'hg forget' to undo add)\n"
2626 )
2626 )
2627 % uipathfn(f)
2627 % uipathfn(f)
2628 )
2628 )
2629 ret = 1
2629 ret = 1
2630 progress.complete()
2630 progress.complete()
2631
2631
2632 list = sorted(list)
2632 list = sorted(list)
2633 progress = ui.makeprogress(
2633 progress = ui.makeprogress(
2634 _(b'deleting'), total=len(list), unit=_(b'files')
2634 _(b'deleting'), total=len(list), unit=_(b'files')
2635 )
2635 )
2636 for f in list:
2636 for f in list:
2637 if ui.verbose or not m.exact(f):
2637 if ui.verbose or not m.exact(f):
2638 progress.increment()
2638 progress.increment()
2639 ui.status(
2639 ui.status(
2640 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2640 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2641 )
2641 )
2642 progress.complete()
2642 progress.complete()
2643
2643
2644 if not dryrun:
2644 if not dryrun:
2645 with repo.wlock():
2645 with repo.wlock():
2646 if not after:
2646 if not after:
2647 for f in list:
2647 for f in list:
2648 if f in added:
2648 if f in added:
2649 continue # we never unlink added files on remove
2649 continue # we never unlink added files on remove
2650 rmdir = repo.ui.configbool(
2650 rmdir = repo.ui.configbool(
2651 b'experimental', b'removeemptydirs'
2651 b'experimental', b'removeemptydirs'
2652 )
2652 )
2653 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2653 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2654 repo[None].forget(list)
2654 repo[None].forget(list)
2655
2655
2656 if warn:
2656 if warn:
2657 for warning in warnings:
2657 for warning in warnings:
2658 ui.warn(warning)
2658 ui.warn(warning)
2659
2659
2660 return ret
2660 return ret
2661
2661
2662
2662
2663 def _catfmtneedsdata(fm):
2663 def _catfmtneedsdata(fm):
2664 return not fm.datahint() or b'data' in fm.datahint()
2664 return not fm.datahint() or b'data' in fm.datahint()
2665
2665
2666
2666
2667 def _updatecatformatter(fm, ctx, matcher, path, decode):
2667 def _updatecatformatter(fm, ctx, matcher, path, decode):
2668 """Hook for adding data to the formatter used by ``hg cat``.
2668 """Hook for adding data to the formatter used by ``hg cat``.
2669
2669
2670 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2670 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2671 this method first."""
2671 this method first."""
2672
2672
2673 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2673 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2674 # wasn't requested.
2674 # wasn't requested.
2675 data = b''
2675 data = b''
2676 if _catfmtneedsdata(fm):
2676 if _catfmtneedsdata(fm):
2677 data = ctx[path].data()
2677 data = ctx[path].data()
2678 if decode:
2678 if decode:
2679 data = ctx.repo().wwritedata(path, data)
2679 data = ctx.repo().wwritedata(path, data)
2680 fm.startitem()
2680 fm.startitem()
2681 fm.context(ctx=ctx)
2681 fm.context(ctx=ctx)
2682 fm.write(b'data', b'%s', data)
2682 fm.write(b'data', b'%s', data)
2683 fm.data(path=path)
2683 fm.data(path=path)
2684
2684
2685
2685
2686 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2686 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2687 err = 1
2687 err = 1
2688 opts = pycompat.byteskwargs(opts)
2688 opts = pycompat.byteskwargs(opts)
2689
2689
2690 def write(path):
2690 def write(path):
2691 filename = None
2691 filename = None
2692 if fntemplate:
2692 if fntemplate:
2693 filename = makefilename(
2693 filename = makefilename(
2694 ctx, fntemplate, pathname=os.path.join(prefix, path)
2694 ctx, fntemplate, pathname=os.path.join(prefix, path)
2695 )
2695 )
2696 # attempt to create the directory if it does not already exist
2696 # attempt to create the directory if it does not already exist
2697 try:
2697 try:
2698 os.makedirs(os.path.dirname(filename))
2698 os.makedirs(os.path.dirname(filename))
2699 except OSError:
2699 except OSError:
2700 pass
2700 pass
2701 with formatter.maybereopen(basefm, filename) as fm:
2701 with formatter.maybereopen(basefm, filename) as fm:
2702 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2702 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2703
2703
2704 # Automation often uses hg cat on single files, so special case it
2704 # Automation often uses hg cat on single files, so special case it
2705 # for performance to avoid the cost of parsing the manifest.
2705 # for performance to avoid the cost of parsing the manifest.
2706 if len(matcher.files()) == 1 and not matcher.anypats():
2706 if len(matcher.files()) == 1 and not matcher.anypats():
2707 file = matcher.files()[0]
2707 file = matcher.files()[0]
2708 mfl = repo.manifestlog
2708 mfl = repo.manifestlog
2709 mfnode = ctx.manifestnode()
2709 mfnode = ctx.manifestnode()
2710 try:
2710 try:
2711 if mfnode and mfl[mfnode].find(file)[0]:
2711 if mfnode and mfl[mfnode].find(file)[0]:
2712 if _catfmtneedsdata(basefm):
2712 if _catfmtneedsdata(basefm):
2713 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2713 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2714 write(file)
2714 write(file)
2715 return 0
2715 return 0
2716 except KeyError:
2716 except KeyError:
2717 pass
2717 pass
2718
2718
2719 if _catfmtneedsdata(basefm):
2719 if _catfmtneedsdata(basefm):
2720 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2720 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2721
2721
2722 for abs in ctx.walk(matcher):
2722 for abs in ctx.walk(matcher):
2723 write(abs)
2723 write(abs)
2724 err = 0
2724 err = 0
2725
2725
2726 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2726 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2727 for subpath in sorted(ctx.substate):
2727 for subpath in sorted(ctx.substate):
2728 sub = ctx.sub(subpath)
2728 sub = ctx.sub(subpath)
2729 try:
2729 try:
2730 submatch = matchmod.subdirmatcher(subpath, matcher)
2730 submatch = matchmod.subdirmatcher(subpath, matcher)
2731 subprefix = os.path.join(prefix, subpath)
2731 subprefix = os.path.join(prefix, subpath)
2732 if not sub.cat(
2732 if not sub.cat(
2733 submatch,
2733 submatch,
2734 basefm,
2734 basefm,
2735 fntemplate,
2735 fntemplate,
2736 subprefix,
2736 subprefix,
2737 **pycompat.strkwargs(opts)
2737 **pycompat.strkwargs(opts)
2738 ):
2738 ):
2739 err = 0
2739 err = 0
2740 except error.RepoLookupError:
2740 except error.RepoLookupError:
2741 ui.status(
2741 ui.status(
2742 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2742 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2743 )
2743 )
2744
2744
2745 return err
2745 return err
2746
2746
2747
2747
2748 def commit(ui, repo, commitfunc, pats, opts):
2748 def commit(ui, repo, commitfunc, pats, opts):
2749 '''commit the specified files or all outstanding changes'''
2749 '''commit the specified files or all outstanding changes'''
2750 date = opts.get(b'date')
2750 date = opts.get(b'date')
2751 if date:
2751 if date:
2752 opts[b'date'] = dateutil.parsedate(date)
2752 opts[b'date'] = dateutil.parsedate(date)
2753 message = logmessage(ui, opts)
2753 message = logmessage(ui, opts)
2754 matcher = scmutil.match(repo[None], pats, opts)
2754 matcher = scmutil.match(repo[None], pats, opts)
2755
2755
2756 dsguard = None
2756 dsguard = None
2757 # extract addremove carefully -- this function can be called from a command
2757 # extract addremove carefully -- this function can be called from a command
2758 # that doesn't support addremove
2758 # that doesn't support addremove
2759 if opts.get(b'addremove'):
2759 if opts.get(b'addremove'):
2760 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2760 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2761 with dsguard or util.nullcontextmanager():
2761 with dsguard or util.nullcontextmanager():
2762 if dsguard:
2762 if dsguard:
2763 relative = scmutil.anypats(pats, opts)
2763 relative = scmutil.anypats(pats, opts)
2764 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2764 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2765 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2765 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2766 raise error.Abort(
2766 raise error.Abort(
2767 _(b"failed to mark all new/missing files as added/removed")
2767 _(b"failed to mark all new/missing files as added/removed")
2768 )
2768 )
2769
2769
2770 return commitfunc(ui, repo, message, matcher, opts)
2770 return commitfunc(ui, repo, message, matcher, opts)
2771
2771
2772
2772
2773 def samefile(f, ctx1, ctx2):
2773 def samefile(f, ctx1, ctx2):
2774 if f in ctx1.manifest():
2774 if f in ctx1.manifest():
2775 a = ctx1.filectx(f)
2775 a = ctx1.filectx(f)
2776 if f in ctx2.manifest():
2776 if f in ctx2.manifest():
2777 b = ctx2.filectx(f)
2777 b = ctx2.filectx(f)
2778 return not a.cmp(b) and a.flags() == b.flags()
2778 return not a.cmp(b) and a.flags() == b.flags()
2779 else:
2779 else:
2780 return False
2780 return False
2781 else:
2781 else:
2782 return f not in ctx2.manifest()
2782 return f not in ctx2.manifest()
2783
2783
2784
2784
2785 def amend(ui, repo, old, extra, pats, opts):
2785 def amend(ui, repo, old, extra, pats, opts):
2786 # avoid cycle context -> subrepo -> cmdutil
2786 # avoid cycle context -> subrepo -> cmdutil
2787 from . import context
2787 from . import context
2788
2788
2789 # amend will reuse the existing user if not specified, but the obsolete
2789 # amend will reuse the existing user if not specified, but the obsolete
2790 # marker creation requires that the current user's name is specified.
2790 # marker creation requires that the current user's name is specified.
2791 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2791 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2792 ui.username() # raise exception if username not set
2792 ui.username() # raise exception if username not set
2793
2793
2794 ui.note(_(b'amending changeset %s\n') % old)
2794 ui.note(_(b'amending changeset %s\n') % old)
2795 base = old.p1()
2795 base = old.p1()
2796
2796
2797 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2797 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2798 # Participating changesets:
2798 # Participating changesets:
2799 #
2799 #
2800 # wctx o - workingctx that contains changes from working copy
2800 # wctx o - workingctx that contains changes from working copy
2801 # | to go into amending commit
2801 # | to go into amending commit
2802 # |
2802 # |
2803 # old o - changeset to amend
2803 # old o - changeset to amend
2804 # |
2804 # |
2805 # base o - first parent of the changeset to amend
2805 # base o - first parent of the changeset to amend
2806 wctx = repo[None]
2806 wctx = repo[None]
2807
2807
2808 # Copy to avoid mutating input
2808 # Copy to avoid mutating input
2809 extra = extra.copy()
2809 extra = extra.copy()
2810 # Update extra dict from amended commit (e.g. to preserve graft
2810 # Update extra dict from amended commit (e.g. to preserve graft
2811 # source)
2811 # source)
2812 extra.update(old.extra())
2812 extra.update(old.extra())
2813
2813
2814 # Also update it from the from the wctx
2814 # Also update it from the from the wctx
2815 extra.update(wctx.extra())
2815 extra.update(wctx.extra())
2816
2816
2817 # date-only change should be ignored?
2817 # date-only change should be ignored?
2818 datemaydiffer = resolvecommitoptions(ui, opts)
2818 datemaydiffer = resolvecommitoptions(ui, opts)
2819
2819
2820 date = old.date()
2820 date = old.date()
2821 if opts.get(b'date'):
2821 if opts.get(b'date'):
2822 date = dateutil.parsedate(opts.get(b'date'))
2822 date = dateutil.parsedate(opts.get(b'date'))
2823 user = opts.get(b'user') or old.user()
2823 user = opts.get(b'user') or old.user()
2824
2824
2825 if len(old.parents()) > 1:
2825 if len(old.parents()) > 1:
2826 # ctx.files() isn't reliable for merges, so fall back to the
2826 # ctx.files() isn't reliable for merges, so fall back to the
2827 # slower repo.status() method
2827 # slower repo.status() method
2828 st = base.status(old)
2828 st = base.status(old)
2829 files = set(st.modified) | set(st.added) | set(st.removed)
2829 files = set(st.modified) | set(st.added) | set(st.removed)
2830 else:
2830 else:
2831 files = set(old.files())
2831 files = set(old.files())
2832
2832
2833 # add/remove the files to the working copy if the "addremove" option
2833 # add/remove the files to the working copy if the "addremove" option
2834 # was specified.
2834 # was specified.
2835 matcher = scmutil.match(wctx, pats, opts)
2835 matcher = scmutil.match(wctx, pats, opts)
2836 relative = scmutil.anypats(pats, opts)
2836 relative = scmutil.anypats(pats, opts)
2837 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2837 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2838 if opts.get(b'addremove') and scmutil.addremove(
2838 if opts.get(b'addremove') and scmutil.addremove(
2839 repo, matcher, b"", uipathfn, opts
2839 repo, matcher, b"", uipathfn, opts
2840 ):
2840 ):
2841 raise error.Abort(
2841 raise error.Abort(
2842 _(b"failed to mark all new/missing files as added/removed")
2842 _(b"failed to mark all new/missing files as added/removed")
2843 )
2843 )
2844
2844
2845 # Check subrepos. This depends on in-place wctx._status update in
2845 # Check subrepos. This depends on in-place wctx._status update in
2846 # subrepo.precommit(). To minimize the risk of this hack, we do
2846 # subrepo.precommit(). To minimize the risk of this hack, we do
2847 # nothing if .hgsub does not exist.
2847 # nothing if .hgsub does not exist.
2848 if b'.hgsub' in wctx or b'.hgsub' in old:
2848 if b'.hgsub' in wctx or b'.hgsub' in old:
2849 subs, commitsubs, newsubstate = subrepoutil.precommit(
2849 subs, commitsubs, newsubstate = subrepoutil.precommit(
2850 ui, wctx, wctx._status, matcher
2850 ui, wctx, wctx._status, matcher
2851 )
2851 )
2852 # amend should abort if commitsubrepos is enabled
2852 # amend should abort if commitsubrepos is enabled
2853 assert not commitsubs
2853 assert not commitsubs
2854 if subs:
2854 if subs:
2855 subrepoutil.writestate(repo, newsubstate)
2855 subrepoutil.writestate(repo, newsubstate)
2856
2856
2857 ms = mergestatemod.mergestate.read(repo)
2857 ms = mergestatemod.mergestate.read(repo)
2858 mergeutil.checkunresolved(ms)
2858 mergeutil.checkunresolved(ms)
2859
2859
2860 filestoamend = {f for f in wctx.files() if matcher(f)}
2860 filestoamend = {f for f in wctx.files() if matcher(f)}
2861
2861
2862 changes = len(filestoamend) > 0
2862 changes = len(filestoamend) > 0
2863 if changes:
2863 if changes:
2864 # Recompute copies (avoid recording a -> b -> a)
2864 # Recompute copies (avoid recording a -> b -> a)
2865 copied = copies.pathcopies(base, wctx, matcher)
2865 copied = copies.pathcopies(base, wctx, matcher)
2866 if old.p2:
2866 if old.p2:
2867 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2867 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2868
2868
2869 # Prune files which were reverted by the updates: if old
2869 # Prune files which were reverted by the updates: if old
2870 # introduced file X and the file was renamed in the working
2870 # introduced file X and the file was renamed in the working
2871 # copy, then those two files are the same and
2871 # copy, then those two files are the same and
2872 # we can discard X from our list of files. Likewise if X
2872 # we can discard X from our list of files. Likewise if X
2873 # was removed, it's no longer relevant. If X is missing (aka
2873 # was removed, it's no longer relevant. If X is missing (aka
2874 # deleted), old X must be preserved.
2874 # deleted), old X must be preserved.
2875 files.update(filestoamend)
2875 files.update(filestoamend)
2876 files = [
2876 files = [
2877 f
2877 f
2878 for f in files
2878 for f in files
2879 if (f not in filestoamend or not samefile(f, wctx, base))
2879 if (f not in filestoamend or not samefile(f, wctx, base))
2880 ]
2880 ]
2881
2881
2882 def filectxfn(repo, ctx_, path):
2882 def filectxfn(repo, ctx_, path):
2883 try:
2883 try:
2884 # If the file being considered is not amongst the files
2884 # If the file being considered is not amongst the files
2885 # to be amended, we should return the file context from the
2885 # to be amended, we should return the file context from the
2886 # old changeset. This avoids issues when only some files in
2886 # old changeset. This avoids issues when only some files in
2887 # the working copy are being amended but there are also
2887 # the working copy are being amended but there are also
2888 # changes to other files from the old changeset.
2888 # changes to other files from the old changeset.
2889 if path not in filestoamend:
2889 if path not in filestoamend:
2890 return old.filectx(path)
2890 return old.filectx(path)
2891
2891
2892 # Return None for removed files.
2892 # Return None for removed files.
2893 if path in wctx.removed():
2893 if path in wctx.removed():
2894 return None
2894 return None
2895
2895
2896 fctx = wctx[path]
2896 fctx = wctx[path]
2897 flags = fctx.flags()
2897 flags = fctx.flags()
2898 mctx = context.memfilectx(
2898 mctx = context.memfilectx(
2899 repo,
2899 repo,
2900 ctx_,
2900 ctx_,
2901 fctx.path(),
2901 fctx.path(),
2902 fctx.data(),
2902 fctx.data(),
2903 islink=b'l' in flags,
2903 islink=b'l' in flags,
2904 isexec=b'x' in flags,
2904 isexec=b'x' in flags,
2905 copysource=copied.get(path),
2905 copysource=copied.get(path),
2906 )
2906 )
2907 return mctx
2907 return mctx
2908 except KeyError:
2908 except KeyError:
2909 return None
2909 return None
2910
2910
2911 else:
2911 else:
2912 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2912 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2913
2913
2914 # Use version of files as in the old cset
2914 # Use version of files as in the old cset
2915 def filectxfn(repo, ctx_, path):
2915 def filectxfn(repo, ctx_, path):
2916 try:
2916 try:
2917 return old.filectx(path)
2917 return old.filectx(path)
2918 except KeyError:
2918 except KeyError:
2919 return None
2919 return None
2920
2920
2921 # See if we got a message from -m or -l, if not, open the editor with
2921 # See if we got a message from -m or -l, if not, open the editor with
2922 # the message of the changeset to amend.
2922 # the message of the changeset to amend.
2923 message = logmessage(ui, opts)
2923 message = logmessage(ui, opts)
2924
2924
2925 editform = mergeeditform(old, b'commit.amend')
2925 editform = mergeeditform(old, b'commit.amend')
2926
2926
2927 if not message:
2927 if not message:
2928 message = old.description()
2928 message = old.description()
2929 # Default if message isn't provided and --edit is not passed is to
2929 # Default if message isn't provided and --edit is not passed is to
2930 # invoke editor, but allow --no-edit. If somehow we don't have any
2930 # invoke editor, but allow --no-edit. If somehow we don't have any
2931 # description, let's always start the editor.
2931 # description, let's always start the editor.
2932 doedit = not message or opts.get(b'edit') in [True, None]
2932 doedit = not message or opts.get(b'edit') in [True, None]
2933 else:
2933 else:
2934 # Default if message is provided is to not invoke editor, but allow
2934 # Default if message is provided is to not invoke editor, but allow
2935 # --edit.
2935 # --edit.
2936 doedit = opts.get(b'edit') is True
2936 doedit = opts.get(b'edit') is True
2937 editor = getcommiteditor(edit=doedit, editform=editform)
2937 editor = getcommiteditor(edit=doedit, editform=editform)
2938
2938
2939 pureextra = extra.copy()
2939 pureextra = extra.copy()
2940 extra[b'amend_source'] = old.hex()
2940 extra[b'amend_source'] = old.hex()
2941
2941
2942 new = context.memctx(
2942 new = context.memctx(
2943 repo,
2943 repo,
2944 parents=[base.node(), old.p2().node()],
2944 parents=[base.node(), old.p2().node()],
2945 text=message,
2945 text=message,
2946 files=files,
2946 files=files,
2947 filectxfn=filectxfn,
2947 filectxfn=filectxfn,
2948 user=user,
2948 user=user,
2949 date=date,
2949 date=date,
2950 extra=extra,
2950 extra=extra,
2951 editor=editor,
2951 editor=editor,
2952 )
2952 )
2953
2953
2954 newdesc = changelog.stripdesc(new.description())
2954 newdesc = changelog.stripdesc(new.description())
2955 if (
2955 if (
2956 (not changes)
2956 (not changes)
2957 and newdesc == old.description()
2957 and newdesc == old.description()
2958 and user == old.user()
2958 and user == old.user()
2959 and (date == old.date() or datemaydiffer)
2959 and (date == old.date() or datemaydiffer)
2960 and pureextra == old.extra()
2960 and pureextra == old.extra()
2961 ):
2961 ):
2962 # nothing changed. continuing here would create a new node
2962 # nothing changed. continuing here would create a new node
2963 # anyway because of the amend_source noise.
2963 # anyway because of the amend_source noise.
2964 #
2964 #
2965 # This not what we expect from amend.
2965 # This not what we expect from amend.
2966 return old.node()
2966 return old.node()
2967
2967
2968 commitphase = None
2968 commitphase = None
2969 if opts.get(b'secret'):
2969 if opts.get(b'secret'):
2970 commitphase = phases.secret
2970 commitphase = phases.secret
2971 newid = repo.commitctx(new)
2971 newid = repo.commitctx(new)
2972 ms.reset()
2972 ms.reset()
2973
2973
2974 # Reroute the working copy parent to the new changeset
2974 # Reroute the working copy parent to the new changeset
2975 repo.setparents(newid, repo.nullid)
2975 repo.setparents(newid, repo.nullid)
2976
2976
2977 # Fixing the dirstate because localrepo.commitctx does not update
2977 # Fixing the dirstate because localrepo.commitctx does not update
2978 # it. This is rather convenient because we did not need to update
2978 # it. This is rather convenient because we did not need to update
2979 # the dirstate for all the files in the new commit which commitctx
2979 # the dirstate for all the files in the new commit which commitctx
2980 # could have done if it updated the dirstate. Now, we can
2980 # could have done if it updated the dirstate. Now, we can
2981 # selectively update the dirstate only for the amended files.
2981 # selectively update the dirstate only for the amended files.
2982 dirstate = repo.dirstate
2982 dirstate = repo.dirstate
2983
2983
2984 # Update the state of the files which were added and modified in the
2984 # Update the state of the files which were added and modified in the
2985 # amend to "normal" in the dirstate. We need to use "normallookup" since
2985 # amend to "normal" in the dirstate. We need to use "normallookup" since
2986 # the files may have changed since the command started; using "normal"
2986 # the files may have changed since the command started; using "normal"
2987 # would mark them as clean but with uncommitted contents.
2987 # would mark them as clean but with uncommitted contents.
2988 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2988 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2989 for f in normalfiles:
2989 for f in normalfiles:
2990 dirstate.normallookup(f)
2990 dirstate.normallookup(f)
2991
2991
2992 # Update the state of files which were removed in the amend
2992 # Update the state of files which were removed in the amend
2993 # to "removed" in the dirstate.
2993 # to "removed" in the dirstate.
2994 removedfiles = set(wctx.removed()) & filestoamend
2994 removedfiles = set(wctx.removed()) & filestoamend
2995 for f in removedfiles:
2995 for f in removedfiles:
2996 dirstate.drop(f)
2996 dirstate.drop(f)
2997
2997
2998 mapping = {old.node(): (newid,)}
2998 mapping = {old.node(): (newid,)}
2999 obsmetadata = None
2999 obsmetadata = None
3000 if opts.get(b'note'):
3000 if opts.get(b'note'):
3001 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3001 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3002 backup = ui.configbool(b'rewrite', b'backup-bundle')
3002 backup = ui.configbool(b'rewrite', b'backup-bundle')
3003 scmutil.cleanupnodes(
3003 scmutil.cleanupnodes(
3004 repo,
3004 repo,
3005 mapping,
3005 mapping,
3006 b'amend',
3006 b'amend',
3007 metadata=obsmetadata,
3007 metadata=obsmetadata,
3008 fixphase=True,
3008 fixphase=True,
3009 targetphase=commitphase,
3009 targetphase=commitphase,
3010 backup=backup,
3010 backup=backup,
3011 )
3011 )
3012
3012
3013 return newid
3013 return newid
3014
3014
3015
3015
3016 def commiteditor(repo, ctx, subs, editform=b''):
3016 def commiteditor(repo, ctx, subs, editform=b''):
3017 if ctx.description():
3017 if ctx.description():
3018 return ctx.description()
3018 return ctx.description()
3019 return commitforceeditor(
3019 return commitforceeditor(
3020 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3020 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3021 )
3021 )
3022
3022
3023
3023
3024 def commitforceeditor(
3024 def commitforceeditor(
3025 repo,
3025 repo,
3026 ctx,
3026 ctx,
3027 subs,
3027 subs,
3028 finishdesc=None,
3028 finishdesc=None,
3029 extramsg=None,
3029 extramsg=None,
3030 editform=b'',
3030 editform=b'',
3031 unchangedmessagedetection=False,
3031 unchangedmessagedetection=False,
3032 ):
3032 ):
3033 if not extramsg:
3033 if not extramsg:
3034 extramsg = _(b"Leave message empty to abort commit.")
3034 extramsg = _(b"Leave message empty to abort commit.")
3035
3035
3036 forms = [e for e in editform.split(b'.') if e]
3036 forms = [e for e in editform.split(b'.') if e]
3037 forms.insert(0, b'changeset')
3037 forms.insert(0, b'changeset')
3038 templatetext = None
3038 templatetext = None
3039 while forms:
3039 while forms:
3040 ref = b'.'.join(forms)
3040 ref = b'.'.join(forms)
3041 if repo.ui.config(b'committemplate', ref):
3041 if repo.ui.config(b'committemplate', ref):
3042 templatetext = committext = buildcommittemplate(
3042 templatetext = committext = buildcommittemplate(
3043 repo, ctx, subs, extramsg, ref
3043 repo, ctx, subs, extramsg, ref
3044 )
3044 )
3045 break
3045 break
3046 forms.pop()
3046 forms.pop()
3047 else:
3047 else:
3048 committext = buildcommittext(repo, ctx, subs, extramsg)
3048 committext = buildcommittext(repo, ctx, subs, extramsg)
3049
3049
3050 # run editor in the repository root
3050 # run editor in the repository root
3051 olddir = encoding.getcwd()
3051 olddir = encoding.getcwd()
3052 os.chdir(repo.root)
3052 os.chdir(repo.root)
3053
3053
3054 # make in-memory changes visible to external process
3054 # make in-memory changes visible to external process
3055 tr = repo.currenttransaction()
3055 tr = repo.currenttransaction()
3056 repo.dirstate.write(tr)
3056 repo.dirstate.write(tr)
3057 pending = tr and tr.writepending() and repo.root
3057 pending = tr and tr.writepending() and repo.root
3058
3058
3059 editortext = repo.ui.edit(
3059 editortext = repo.ui.edit(
3060 committext,
3060 committext,
3061 ctx.user(),
3061 ctx.user(),
3062 ctx.extra(),
3062 ctx.extra(),
3063 editform=editform,
3063 editform=editform,
3064 pending=pending,
3064 pending=pending,
3065 repopath=repo.path,
3065 repopath=repo.path,
3066 action=b'commit',
3066 action=b'commit',
3067 )
3067 )
3068 text = editortext
3068 text = editortext
3069
3069
3070 # strip away anything below this special string (used for editors that want
3070 # strip away anything below this special string (used for editors that want
3071 # to display the diff)
3071 # to display the diff)
3072 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3072 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3073 if stripbelow:
3073 if stripbelow:
3074 text = text[: stripbelow.start()]
3074 text = text[: stripbelow.start()]
3075
3075
3076 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3076 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3077 os.chdir(olddir)
3077 os.chdir(olddir)
3078
3078
3079 if finishdesc:
3079 if finishdesc:
3080 text = finishdesc(text)
3080 text = finishdesc(text)
3081 if not text.strip():
3081 if not text.strip():
3082 raise error.InputError(_(b"empty commit message"))
3082 raise error.InputError(_(b"empty commit message"))
3083 if unchangedmessagedetection and editortext == templatetext:
3083 if unchangedmessagedetection and editortext == templatetext:
3084 raise error.InputError(_(b"commit message unchanged"))
3084 raise error.InputError(_(b"commit message unchanged"))
3085
3085
3086 return text
3086 return text
3087
3087
3088
3088
3089 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3089 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3090 ui = repo.ui
3090 ui = repo.ui
3091 spec = formatter.reference_templatespec(ref)
3091 spec = formatter.reference_templatespec(ref)
3092 t = logcmdutil.changesettemplater(ui, repo, spec)
3092 t = logcmdutil.changesettemplater(ui, repo, spec)
3093 t.t.cache.update(
3093 t.t.cache.update(
3094 (k, templater.unquotestring(v))
3094 (k, templater.unquotestring(v))
3095 for k, v in repo.ui.configitems(b'committemplate')
3095 for k, v in repo.ui.configitems(b'committemplate')
3096 )
3096 )
3097
3097
3098 if not extramsg:
3098 if not extramsg:
3099 extramsg = b'' # ensure that extramsg is string
3099 extramsg = b'' # ensure that extramsg is string
3100
3100
3101 ui.pushbuffer()
3101 ui.pushbuffer()
3102 t.show(ctx, extramsg=extramsg)
3102 t.show(ctx, extramsg=extramsg)
3103 return ui.popbuffer()
3103 return ui.popbuffer()
3104
3104
3105
3105
3106 def hgprefix(msg):
3106 def hgprefix(msg):
3107 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3107 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3108
3108
3109
3109
3110 def buildcommittext(repo, ctx, subs, extramsg):
3110 def buildcommittext(repo, ctx, subs, extramsg):
3111 edittext = []
3111 edittext = []
3112 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3112 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3113 if ctx.description():
3113 if ctx.description():
3114 edittext.append(ctx.description())
3114 edittext.append(ctx.description())
3115 edittext.append(b"")
3115 edittext.append(b"")
3116 edittext.append(b"") # Empty line between message and comments.
3116 edittext.append(b"") # Empty line between message and comments.
3117 edittext.append(
3117 edittext.append(
3118 hgprefix(
3118 hgprefix(
3119 _(
3119 _(
3120 b"Enter commit message."
3120 b"Enter commit message."
3121 b" Lines beginning with 'HG:' are removed."
3121 b" Lines beginning with 'HG:' are removed."
3122 )
3122 )
3123 )
3123 )
3124 )
3124 )
3125 edittext.append(hgprefix(extramsg))
3125 edittext.append(hgprefix(extramsg))
3126 edittext.append(b"HG: --")
3126 edittext.append(b"HG: --")
3127 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3127 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3128 if ctx.p2():
3128 if ctx.p2():
3129 edittext.append(hgprefix(_(b"branch merge")))
3129 edittext.append(hgprefix(_(b"branch merge")))
3130 if ctx.branch():
3130 if ctx.branch():
3131 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3131 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3132 if bookmarks.isactivewdirparent(repo):
3132 if bookmarks.isactivewdirparent(repo):
3133 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3133 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3134 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3134 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3135 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3135 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3136 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3136 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3137 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3137 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3138 if not added and not modified and not removed:
3138 if not added and not modified and not removed:
3139 edittext.append(hgprefix(_(b"no files changed")))
3139 edittext.append(hgprefix(_(b"no files changed")))
3140 edittext.append(b"")
3140 edittext.append(b"")
3141
3141
3142 return b"\n".join(edittext)
3142 return b"\n".join(edittext)
3143
3143
3144
3144
3145 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3145 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3146 if opts is None:
3146 if opts is None:
3147 opts = {}
3147 opts = {}
3148 ctx = repo[node]
3148 ctx = repo[node]
3149 parents = ctx.parents()
3149 parents = ctx.parents()
3150
3150
3151 if tip is not None and repo.changelog.tip() == tip:
3151 if tip is not None and repo.changelog.tip() == tip:
3152 # avoid reporting something like "committed new head" when
3152 # avoid reporting something like "committed new head" when
3153 # recommitting old changesets, and issue a helpful warning
3153 # recommitting old changesets, and issue a helpful warning
3154 # for most instances
3154 # for most instances
3155 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3155 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3156 elif (
3156 elif (
3157 not opts.get(b'amend')
3157 not opts.get(b'amend')
3158 and bheads
3158 and bheads
3159 and node not in bheads
3159 and node not in bheads
3160 and not any(
3160 and not any(
3161 p.node() in bheads and p.branch() == branch for p in parents
3161 p.node() in bheads and p.branch() == branch for p in parents
3162 )
3162 )
3163 ):
3163 ):
3164 repo.ui.status(_(b'created new head\n'))
3164 repo.ui.status(_(b'created new head\n'))
3165 # The message is not printed for initial roots. For the other
3165 # The message is not printed for initial roots. For the other
3166 # changesets, it is printed in the following situations:
3166 # changesets, it is printed in the following situations:
3167 #
3167 #
3168 # Par column: for the 2 parents with ...
3168 # Par column: for the 2 parents with ...
3169 # N: null or no parent
3169 # N: null or no parent
3170 # B: parent is on another named branch
3170 # B: parent is on another named branch
3171 # C: parent is a regular non head changeset
3171 # C: parent is a regular non head changeset
3172 # H: parent was a branch head of the current branch
3172 # H: parent was a branch head of the current branch
3173 # Msg column: whether we print "created new head" message
3173 # Msg column: whether we print "created new head" message
3174 # In the following, it is assumed that there already exists some
3174 # In the following, it is assumed that there already exists some
3175 # initial branch heads of the current branch, otherwise nothing is
3175 # initial branch heads of the current branch, otherwise nothing is
3176 # printed anyway.
3176 # printed anyway.
3177 #
3177 #
3178 # Par Msg Comment
3178 # Par Msg Comment
3179 # N N y additional topo root
3179 # N N y additional topo root
3180 #
3180 #
3181 # B N y additional branch root
3181 # B N y additional branch root
3182 # C N y additional topo head
3182 # C N y additional topo head
3183 # H N n usual case
3183 # H N n usual case
3184 #
3184 #
3185 # B B y weird additional branch root
3185 # B B y weird additional branch root
3186 # C B y branch merge
3186 # C B y branch merge
3187 # H B n merge with named branch
3187 # H B n merge with named branch
3188 #
3188 #
3189 # C C y additional head from merge
3189 # C C y additional head from merge
3190 # C H n merge with a head
3190 # C H n merge with a head
3191 #
3191 #
3192 # H H n head merge: head count decreases
3192 # H H n head merge: head count decreases
3193
3193
3194 if not opts.get(b'close_branch'):
3194 if not opts.get(b'close_branch'):
3195 for r in parents:
3195 for r in parents:
3196 if r.closesbranch() and r.branch() == branch:
3196 if r.closesbranch() and r.branch() == branch:
3197 repo.ui.status(
3197 repo.ui.status(
3198 _(b'reopening closed branch head %d\n') % r.rev()
3198 _(b'reopening closed branch head %d\n') % r.rev()
3199 )
3199 )
3200
3200
3201 if repo.ui.debugflag:
3201 if repo.ui.debugflag:
3202 repo.ui.write(
3202 repo.ui.write(
3203 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3203 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3204 )
3204 )
3205 elif repo.ui.verbose:
3205 elif repo.ui.verbose:
3206 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3206 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3207
3207
3208
3208
3209 def postcommitstatus(repo, pats, opts):
3209 def postcommitstatus(repo, pats, opts):
3210 return repo.status(match=scmutil.match(repo[None], pats, opts))
3210 return repo.status(match=scmutil.match(repo[None], pats, opts))
3211
3211
3212
3212
3213 def revert(ui, repo, ctx, *pats, **opts):
3213 def revert(ui, repo, ctx, *pats, **opts):
3214 opts = pycompat.byteskwargs(opts)
3214 opts = pycompat.byteskwargs(opts)
3215 parent, p2 = repo.dirstate.parents()
3215 parent, p2 = repo.dirstate.parents()
3216 node = ctx.node()
3216 node = ctx.node()
3217
3217
3218 mf = ctx.manifest()
3218 mf = ctx.manifest()
3219 if node == p2:
3219 if node == p2:
3220 parent = p2
3220 parent = p2
3221
3221
3222 # need all matching names in dirstate and manifest of target rev,
3222 # need all matching names in dirstate and manifest of target rev,
3223 # so have to walk both. do not print errors if files exist in one
3223 # so have to walk both. do not print errors if files exist in one
3224 # but not other. in both cases, filesets should be evaluated against
3224 # but not other. in both cases, filesets should be evaluated against
3225 # workingctx to get consistent result (issue4497). this means 'set:**'
3225 # workingctx to get consistent result (issue4497). this means 'set:**'
3226 # cannot be used to select missing files from target rev.
3226 # cannot be used to select missing files from target rev.
3227
3227
3228 # `names` is a mapping for all elements in working copy and target revision
3228 # `names` is a mapping for all elements in working copy and target revision
3229 # The mapping is in the form:
3229 # The mapping is in the form:
3230 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3230 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3231 names = {}
3231 names = {}
3232 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3232 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3233
3233
3234 with repo.wlock():
3234 with repo.wlock():
3235 ## filling of the `names` mapping
3235 ## filling of the `names` mapping
3236 # walk dirstate to fill `names`
3236 # walk dirstate to fill `names`
3237
3237
3238 interactive = opts.get(b'interactive', False)
3238 interactive = opts.get(b'interactive', False)
3239 wctx = repo[None]
3239 wctx = repo[None]
3240 m = scmutil.match(wctx, pats, opts)
3240 m = scmutil.match(wctx, pats, opts)
3241
3241
3242 # we'll need this later
3242 # we'll need this later
3243 targetsubs = sorted(s for s in wctx.substate if m(s))
3243 targetsubs = sorted(s for s in wctx.substate if m(s))
3244
3244
3245 if not m.always():
3245 if not m.always():
3246 matcher = matchmod.badmatch(m, lambda x, y: False)
3246 matcher = matchmod.badmatch(m, lambda x, y: False)
3247 for abs in wctx.walk(matcher):
3247 for abs in wctx.walk(matcher):
3248 names[abs] = m.exact(abs)
3248 names[abs] = m.exact(abs)
3249
3249
3250 # walk target manifest to fill `names`
3250 # walk target manifest to fill `names`
3251
3251
3252 def badfn(path, msg):
3252 def badfn(path, msg):
3253 if path in names:
3253 if path in names:
3254 return
3254 return
3255 if path in ctx.substate:
3255 if path in ctx.substate:
3256 return
3256 return
3257 path_ = path + b'/'
3257 path_ = path + b'/'
3258 for f in names:
3258 for f in names:
3259 if f.startswith(path_):
3259 if f.startswith(path_):
3260 return
3260 return
3261 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3261 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3262
3262
3263 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3263 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3264 if abs not in names:
3264 if abs not in names:
3265 names[abs] = m.exact(abs)
3265 names[abs] = m.exact(abs)
3266
3266
3267 # Find status of all file in `names`.
3267 # Find status of all file in `names`.
3268 m = scmutil.matchfiles(repo, names)
3268 m = scmutil.matchfiles(repo, names)
3269
3269
3270 changes = repo.status(
3270 changes = repo.status(
3271 node1=node, match=m, unknown=True, ignored=True, clean=True
3271 node1=node, match=m, unknown=True, ignored=True, clean=True
3272 )
3272 )
3273 else:
3273 else:
3274 changes = repo.status(node1=node, match=m)
3274 changes = repo.status(node1=node, match=m)
3275 for kind in changes:
3275 for kind in changes:
3276 for abs in kind:
3276 for abs in kind:
3277 names[abs] = m.exact(abs)
3277 names[abs] = m.exact(abs)
3278
3278
3279 m = scmutil.matchfiles(repo, names)
3279 m = scmutil.matchfiles(repo, names)
3280
3280
3281 modified = set(changes.modified)
3281 modified = set(changes.modified)
3282 added = set(changes.added)
3282 added = set(changes.added)
3283 removed = set(changes.removed)
3283 removed = set(changes.removed)
3284 _deleted = set(changes.deleted)
3284 _deleted = set(changes.deleted)
3285 unknown = set(changes.unknown)
3285 unknown = set(changes.unknown)
3286 unknown.update(changes.ignored)
3286 unknown.update(changes.ignored)
3287 clean = set(changes.clean)
3287 clean = set(changes.clean)
3288 modadded = set()
3288 modadded = set()
3289
3289
3290 # We need to account for the state of the file in the dirstate,
3290 # We need to account for the state of the file in the dirstate,
3291 # even when we revert against something else than parent. This will
3291 # even when we revert against something else than parent. This will
3292 # slightly alter the behavior of revert (doing back up or not, delete
3292 # slightly alter the behavior of revert (doing back up or not, delete
3293 # or just forget etc).
3293 # or just forget etc).
3294 if parent == node:
3294 if parent == node:
3295 dsmodified = modified
3295 dsmodified = modified
3296 dsadded = added
3296 dsadded = added
3297 dsremoved = removed
3297 dsremoved = removed
3298 # store all local modifications, useful later for rename detection
3298 # store all local modifications, useful later for rename detection
3299 localchanges = dsmodified | dsadded
3299 localchanges = dsmodified | dsadded
3300 modified, added, removed = set(), set(), set()
3300 modified, added, removed = set(), set(), set()
3301 else:
3301 else:
3302 changes = repo.status(node1=parent, match=m)
3302 changes = repo.status(node1=parent, match=m)
3303 dsmodified = set(changes.modified)
3303 dsmodified = set(changes.modified)
3304 dsadded = set(changes.added)
3304 dsadded = set(changes.added)
3305 dsremoved = set(changes.removed)
3305 dsremoved = set(changes.removed)
3306 # store all local modifications, useful later for rename detection
3306 # store all local modifications, useful later for rename detection
3307 localchanges = dsmodified | dsadded
3307 localchanges = dsmodified | dsadded
3308
3308
3309 # only take into account for removes between wc and target
3309 # only take into account for removes between wc and target
3310 clean |= dsremoved - removed
3310 clean |= dsremoved - removed
3311 dsremoved &= removed
3311 dsremoved &= removed
3312 # distinct between dirstate remove and other
3312 # distinct between dirstate remove and other
3313 removed -= dsremoved
3313 removed -= dsremoved
3314
3314
3315 modadded = added & dsmodified
3315 modadded = added & dsmodified
3316 added -= modadded
3316 added -= modadded
3317
3317
3318 # tell newly modified apart.
3318 # tell newly modified apart.
3319 dsmodified &= modified
3319 dsmodified &= modified
3320 dsmodified |= modified & dsadded # dirstate added may need backup
3320 dsmodified |= modified & dsadded # dirstate added may need backup
3321 modified -= dsmodified
3321 modified -= dsmodified
3322
3322
3323 # We need to wait for some post-processing to update this set
3323 # We need to wait for some post-processing to update this set
3324 # before making the distinction. The dirstate will be used for
3324 # before making the distinction. The dirstate will be used for
3325 # that purpose.
3325 # that purpose.
3326 dsadded = added
3326 dsadded = added
3327
3327
3328 # in case of merge, files that are actually added can be reported as
3328 # in case of merge, files that are actually added can be reported as
3329 # modified, we need to post process the result
3329 # modified, we need to post process the result
3330 if p2 != repo.nullid:
3330 if p2 != repo.nullid:
3331 mergeadd = set(dsmodified)
3331 mergeadd = set(dsmodified)
3332 for path in dsmodified:
3332 for path in dsmodified:
3333 if path in mf:
3333 if path in mf:
3334 mergeadd.remove(path)
3334 mergeadd.remove(path)
3335 dsadded |= mergeadd
3335 dsadded |= mergeadd
3336 dsmodified -= mergeadd
3336 dsmodified -= mergeadd
3337
3337
3338 # if f is a rename, update `names` to also revert the source
3338 # if f is a rename, update `names` to also revert the source
3339 for f in localchanges:
3339 for f in localchanges:
3340 src = repo.dirstate.copied(f)
3340 src = repo.dirstate.copied(f)
3341 # XXX should we check for rename down to target node?
3341 # XXX should we check for rename down to target node?
3342 if src and src not in names and repo.dirstate[src] == b'r':
3342 if src and src not in names and repo.dirstate[src] == b'r':
3343 dsremoved.add(src)
3343 dsremoved.add(src)
3344 names[src] = True
3344 names[src] = True
3345
3345
3346 # determine the exact nature of the deleted changesets
3346 # determine the exact nature of the deleted changesets
3347 deladded = set(_deleted)
3347 deladded = set(_deleted)
3348 for path in _deleted:
3348 for path in _deleted:
3349 if path in mf:
3349 if path in mf:
3350 deladded.remove(path)
3350 deladded.remove(path)
3351 deleted = _deleted - deladded
3351 deleted = _deleted - deladded
3352
3352
3353 # distinguish between file to forget and the other
3353 # distinguish between file to forget and the other
3354 added = set()
3354 added = set()
3355 for abs in dsadded:
3355 for abs in dsadded:
3356 if repo.dirstate[abs] != b'a':
3356 if repo.dirstate[abs] != b'a':
3357 added.add(abs)
3357 added.add(abs)
3358 dsadded -= added
3358 dsadded -= added
3359
3359
3360 for abs in deladded:
3360 for abs in deladded:
3361 if repo.dirstate[abs] == b'a':
3361 if repo.dirstate[abs] == b'a':
3362 dsadded.add(abs)
3362 dsadded.add(abs)
3363 deladded -= dsadded
3363 deladded -= dsadded
3364
3364
3365 # For files marked as removed, we check if an unknown file is present at
3365 # For files marked as removed, we check if an unknown file is present at
3366 # the same path. If a such file exists it may need to be backed up.
3366 # the same path. If a such file exists it may need to be backed up.
3367 # Making the distinction at this stage helps have simpler backup
3367 # Making the distinction at this stage helps have simpler backup
3368 # logic.
3368 # logic.
3369 removunk = set()
3369 removunk = set()
3370 for abs in removed:
3370 for abs in removed:
3371 target = repo.wjoin(abs)
3371 target = repo.wjoin(abs)
3372 if os.path.lexists(target):
3372 if os.path.lexists(target):
3373 removunk.add(abs)
3373 removunk.add(abs)
3374 removed -= removunk
3374 removed -= removunk
3375
3375
3376 dsremovunk = set()
3376 dsremovunk = set()
3377 for abs in dsremoved:
3377 for abs in dsremoved:
3378 target = repo.wjoin(abs)
3378 target = repo.wjoin(abs)
3379 if os.path.lexists(target):
3379 if os.path.lexists(target):
3380 dsremovunk.add(abs)
3380 dsremovunk.add(abs)
3381 dsremoved -= dsremovunk
3381 dsremoved -= dsremovunk
3382
3382
3383 # action to be actually performed by revert
3383 # action to be actually performed by revert
3384 # (<list of file>, message>) tuple
3384 # (<list of file>, message>) tuple
3385 actions = {
3385 actions = {
3386 b'revert': ([], _(b'reverting %s\n')),
3386 b'revert': ([], _(b'reverting %s\n')),
3387 b'add': ([], _(b'adding %s\n')),
3387 b'add': ([], _(b'adding %s\n')),
3388 b'remove': ([], _(b'removing %s\n')),
3388 b'remove': ([], _(b'removing %s\n')),
3389 b'drop': ([], _(b'removing %s\n')),
3389 b'drop': ([], _(b'removing %s\n')),
3390 b'forget': ([], _(b'forgetting %s\n')),
3390 b'forget': ([], _(b'forgetting %s\n')),
3391 b'undelete': ([], _(b'undeleting %s\n')),
3391 b'undelete': ([], _(b'undeleting %s\n')),
3392 b'noop': (None, _(b'no changes needed to %s\n')),
3392 b'noop': (None, _(b'no changes needed to %s\n')),
3393 b'unknown': (None, _(b'file not managed: %s\n')),
3393 b'unknown': (None, _(b'file not managed: %s\n')),
3394 }
3394 }
3395
3395
3396 # "constant" that convey the backup strategy.
3396 # "constant" that convey the backup strategy.
3397 # All set to `discard` if `no-backup` is set do avoid checking
3397 # All set to `discard` if `no-backup` is set do avoid checking
3398 # no_backup lower in the code.
3398 # no_backup lower in the code.
3399 # These values are ordered for comparison purposes
3399 # These values are ordered for comparison purposes
3400 backupinteractive = 3 # do backup if interactively modified
3400 backupinteractive = 3 # do backup if interactively modified
3401 backup = 2 # unconditionally do backup
3401 backup = 2 # unconditionally do backup
3402 check = 1 # check if the existing file differs from target
3402 check = 1 # check if the existing file differs from target
3403 discard = 0 # never do backup
3403 discard = 0 # never do backup
3404 if opts.get(b'no_backup'):
3404 if opts.get(b'no_backup'):
3405 backupinteractive = backup = check = discard
3405 backupinteractive = backup = check = discard
3406 if interactive:
3406 if interactive:
3407 dsmodifiedbackup = backupinteractive
3407 dsmodifiedbackup = backupinteractive
3408 else:
3408 else:
3409 dsmodifiedbackup = backup
3409 dsmodifiedbackup = backup
3410 tobackup = set()
3410 tobackup = set()
3411
3411
3412 backupanddel = actions[b'remove']
3412 backupanddel = actions[b'remove']
3413 if not opts.get(b'no_backup'):
3413 if not opts.get(b'no_backup'):
3414 backupanddel = actions[b'drop']
3414 backupanddel = actions[b'drop']
3415
3415
3416 disptable = (
3416 disptable = (
3417 # dispatch table:
3417 # dispatch table:
3418 # file state
3418 # file state
3419 # action
3419 # action
3420 # make backup
3420 # make backup
3421 ## Sets that results that will change file on disk
3421 ## Sets that results that will change file on disk
3422 # Modified compared to target, no local change
3422 # Modified compared to target, no local change
3423 (modified, actions[b'revert'], discard),
3423 (modified, actions[b'revert'], discard),
3424 # Modified compared to target, but local file is deleted
3424 # Modified compared to target, but local file is deleted
3425 (deleted, actions[b'revert'], discard),
3425 (deleted, actions[b'revert'], discard),
3426 # Modified compared to target, local change
3426 # Modified compared to target, local change
3427 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3427 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3428 # Added since target
3428 # Added since target
3429 (added, actions[b'remove'], discard),
3429 (added, actions[b'remove'], discard),
3430 # Added in working directory
3430 # Added in working directory
3431 (dsadded, actions[b'forget'], discard),
3431 (dsadded, actions[b'forget'], discard),
3432 # Added since target, have local modification
3432 # Added since target, have local modification
3433 (modadded, backupanddel, backup),
3433 (modadded, backupanddel, backup),
3434 # Added since target but file is missing in working directory
3434 # Added since target but file is missing in working directory
3435 (deladded, actions[b'drop'], discard),
3435 (deladded, actions[b'drop'], discard),
3436 # Removed since target, before working copy parent
3436 # Removed since target, before working copy parent
3437 (removed, actions[b'add'], discard),
3437 (removed, actions[b'add'], discard),
3438 # Same as `removed` but an unknown file exists at the same path
3438 # Same as `removed` but an unknown file exists at the same path
3439 (removunk, actions[b'add'], check),
3439 (removunk, actions[b'add'], check),
3440 # Removed since targe, marked as such in working copy parent
3440 # Removed since targe, marked as such in working copy parent
3441 (dsremoved, actions[b'undelete'], discard),
3441 (dsremoved, actions[b'undelete'], discard),
3442 # Same as `dsremoved` but an unknown file exists at the same path
3442 # Same as `dsremoved` but an unknown file exists at the same path
3443 (dsremovunk, actions[b'undelete'], check),
3443 (dsremovunk, actions[b'undelete'], check),
3444 ## the following sets does not result in any file changes
3444 ## the following sets does not result in any file changes
3445 # File with no modification
3445 # File with no modification
3446 (clean, actions[b'noop'], discard),
3446 (clean, actions[b'noop'], discard),
3447 # Existing file, not tracked anywhere
3447 # Existing file, not tracked anywhere
3448 (unknown, actions[b'unknown'], discard),
3448 (unknown, actions[b'unknown'], discard),
3449 )
3449 )
3450
3450
3451 for abs, exact in sorted(names.items()):
3451 for abs, exact in sorted(names.items()):
3452 # target file to be touch on disk (relative to cwd)
3452 # target file to be touch on disk (relative to cwd)
3453 target = repo.wjoin(abs)
3453 target = repo.wjoin(abs)
3454 # search the entry in the dispatch table.
3454 # search the entry in the dispatch table.
3455 # if the file is in any of these sets, it was touched in the working
3455 # if the file is in any of these sets, it was touched in the working
3456 # directory parent and we are sure it needs to be reverted.
3456 # directory parent and we are sure it needs to be reverted.
3457 for table, (xlist, msg), dobackup in disptable:
3457 for table, (xlist, msg), dobackup in disptable:
3458 if abs not in table:
3458 if abs not in table:
3459 continue
3459 continue
3460 if xlist is not None:
3460 if xlist is not None:
3461 xlist.append(abs)
3461 xlist.append(abs)
3462 if dobackup:
3462 if dobackup:
3463 # If in interactive mode, don't automatically create
3463 # If in interactive mode, don't automatically create
3464 # .orig files (issue4793)
3464 # .orig files (issue4793)
3465 if dobackup == backupinteractive:
3465 if dobackup == backupinteractive:
3466 tobackup.add(abs)
3466 tobackup.add(abs)
3467 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3467 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3468 absbakname = scmutil.backuppath(ui, repo, abs)
3468 absbakname = scmutil.backuppath(ui, repo, abs)
3469 bakname = os.path.relpath(
3469 bakname = os.path.relpath(
3470 absbakname, start=repo.root
3470 absbakname, start=repo.root
3471 )
3471 )
3472 ui.note(
3472 ui.note(
3473 _(b'saving current version of %s as %s\n')
3473 _(b'saving current version of %s as %s\n')
3474 % (uipathfn(abs), uipathfn(bakname))
3474 % (uipathfn(abs), uipathfn(bakname))
3475 )
3475 )
3476 if not opts.get(b'dry_run'):
3476 if not opts.get(b'dry_run'):
3477 if interactive:
3477 if interactive:
3478 util.copyfile(target, absbakname)
3478 util.copyfile(target, absbakname)
3479 else:
3479 else:
3480 util.rename(target, absbakname)
3480 util.rename(target, absbakname)
3481 if opts.get(b'dry_run'):
3481 if opts.get(b'dry_run'):
3482 if ui.verbose or not exact:
3482 if ui.verbose or not exact:
3483 ui.status(msg % uipathfn(abs))
3483 ui.status(msg % uipathfn(abs))
3484 elif exact:
3484 elif exact:
3485 ui.warn(msg % uipathfn(abs))
3485 ui.warn(msg % uipathfn(abs))
3486 break
3486 break
3487
3487
3488 if not opts.get(b'dry_run'):
3488 if not opts.get(b'dry_run'):
3489 needdata = (b'revert', b'add', b'undelete')
3489 needdata = (b'revert', b'add', b'undelete')
3490 oplist = [actions[name][0] for name in needdata]
3490 oplist = [actions[name][0] for name in needdata]
3491 prefetch = scmutil.prefetchfiles
3491 prefetch = scmutil.prefetchfiles
3492 matchfiles = scmutil.matchfiles(
3492 matchfiles = scmutil.matchfiles(
3493 repo, [f for sublist in oplist for f in sublist]
3493 repo, [f for sublist in oplist for f in sublist]
3494 )
3494 )
3495 prefetch(
3495 prefetch(
3496 repo,
3496 repo,
3497 [(ctx.rev(), matchfiles)],
3497 [(ctx.rev(), matchfiles)],
3498 )
3498 )
3499 match = scmutil.match(repo[None], pats)
3499 match = scmutil.match(repo[None], pats)
3500 _performrevert(
3500 _performrevert(
3501 repo,
3501 repo,
3502 ctx,
3502 ctx,
3503 names,
3503 names,
3504 uipathfn,
3504 uipathfn,
3505 actions,
3505 actions,
3506 match,
3506 match,
3507 interactive,
3507 interactive,
3508 tobackup,
3508 tobackup,
3509 )
3509 )
3510
3510
3511 if targetsubs:
3511 if targetsubs:
3512 # Revert the subrepos on the revert list
3512 # Revert the subrepos on the revert list
3513 for sub in targetsubs:
3513 for sub in targetsubs:
3514 try:
3514 try:
3515 wctx.sub(sub).revert(
3515 wctx.sub(sub).revert(
3516 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3516 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3517 )
3517 )
3518 except KeyError:
3518 except KeyError:
3519 raise error.Abort(
3519 raise error.Abort(
3520 b"subrepository '%s' does not exist in %s!"
3520 b"subrepository '%s' does not exist in %s!"
3521 % (sub, short(ctx.node()))
3521 % (sub, short(ctx.node()))
3522 )
3522 )
3523
3523
3524
3524
3525 def _performrevert(
3525 def _performrevert(
3526 repo,
3526 repo,
3527 ctx,
3527 ctx,
3528 names,
3528 names,
3529 uipathfn,
3529 uipathfn,
3530 actions,
3530 actions,
3531 match,
3531 match,
3532 interactive=False,
3532 interactive=False,
3533 tobackup=None,
3533 tobackup=None,
3534 ):
3534 ):
3535 """function that actually perform all the actions computed for revert
3535 """function that actually perform all the actions computed for revert
3536
3536
3537 This is an independent function to let extension to plug in and react to
3537 This is an independent function to let extension to plug in and react to
3538 the imminent revert.
3538 the imminent revert.
3539
3539
3540 Make sure you have the working directory locked when calling this function.
3540 Make sure you have the working directory locked when calling this function.
3541 """
3541 """
3542 parent, p2 = repo.dirstate.parents()
3542 parent, p2 = repo.dirstate.parents()
3543 node = ctx.node()
3543 node = ctx.node()
3544 excluded_files = []
3544 excluded_files = []
3545
3545
3546 def checkout(f):
3546 def checkout(f):
3547 fc = ctx[f]
3547 fc = ctx[f]
3548 repo.wwrite(f, fc.data(), fc.flags())
3548 repo.wwrite(f, fc.data(), fc.flags())
3549
3549
3550 def doremove(f):
3550 def doremove(f):
3551 try:
3551 try:
3552 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3552 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3553 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3553 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3554 except OSError:
3554 except OSError:
3555 pass
3555 pass
3556 repo.dirstate.remove(f)
3556 repo.dirstate.remove(f)
3557
3557
3558 def prntstatusmsg(action, f):
3558 def prntstatusmsg(action, f):
3559 exact = names[f]
3559 exact = names[f]
3560 if repo.ui.verbose or not exact:
3560 if repo.ui.verbose or not exact:
3561 repo.ui.status(actions[action][1] % uipathfn(f))
3561 repo.ui.status(actions[action][1] % uipathfn(f))
3562
3562
3563 audit_path = pathutil.pathauditor(repo.root, cached=True)
3563 audit_path = pathutil.pathauditor(repo.root, cached=True)
3564 for f in actions[b'forget'][0]:
3564 for f in actions[b'forget'][0]:
3565 if interactive:
3565 if interactive:
3566 choice = repo.ui.promptchoice(
3566 choice = repo.ui.promptchoice(
3567 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3567 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3568 )
3568 )
3569 if choice == 0:
3569 if choice == 0:
3570 prntstatusmsg(b'forget', f)
3570 prntstatusmsg(b'forget', f)
3571 repo.dirstate.drop(f)
3571 repo.dirstate.drop(f)
3572 else:
3572 else:
3573 excluded_files.append(f)
3573 excluded_files.append(f)
3574 else:
3574 else:
3575 prntstatusmsg(b'forget', f)
3575 prntstatusmsg(b'forget', f)
3576 repo.dirstate.drop(f)
3576 repo.dirstate.drop(f)
3577 for f in actions[b'remove'][0]:
3577 for f in actions[b'remove'][0]:
3578 audit_path(f)
3578 audit_path(f)
3579 if interactive:
3579 if interactive:
3580 choice = repo.ui.promptchoice(
3580 choice = repo.ui.promptchoice(
3581 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3581 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3582 )
3582 )
3583 if choice == 0:
3583 if choice == 0:
3584 prntstatusmsg(b'remove', f)
3584 prntstatusmsg(b'remove', f)
3585 doremove(f)
3585 doremove(f)
3586 else:
3586 else:
3587 excluded_files.append(f)
3587 excluded_files.append(f)
3588 else:
3588 else:
3589 prntstatusmsg(b'remove', f)
3589 prntstatusmsg(b'remove', f)
3590 doremove(f)
3590 doremove(f)
3591 for f in actions[b'drop'][0]:
3591 for f in actions[b'drop'][0]:
3592 audit_path(f)
3592 audit_path(f)
3593 prntstatusmsg(b'drop', f)
3593 prntstatusmsg(b'drop', f)
3594 repo.dirstate.remove(f)
3594 repo.dirstate.remove(f)
3595
3595
3596 normal = None
3596 normal = None
3597 if node == parent:
3597 if node == parent:
3598 # We're reverting to our parent. If possible, we'd like status
3598 # We're reverting to our parent. If possible, we'd like status
3599 # to report the file as clean. We have to use normallookup for
3599 # to report the file as clean. We have to use normallookup for
3600 # merges to avoid losing information about merged/dirty files.
3600 # merges to avoid losing information about merged/dirty files.
3601 if p2 != repo.nullid:
3601 if p2 != repo.nullid:
3602 normal = repo.dirstate.normallookup
3602 normal = repo.dirstate.normallookup
3603 else:
3603 else:
3604 normal = repo.dirstate.normal
3604 normal = repo.dirstate.normal
3605
3605
3606 newlyaddedandmodifiedfiles = set()
3606 newlyaddedandmodifiedfiles = set()
3607 if interactive:
3607 if interactive:
3608 # Prompt the user for changes to revert
3608 # Prompt the user for changes to revert
3609 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3609 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3610 m = scmutil.matchfiles(repo, torevert)
3610 m = scmutil.matchfiles(repo, torevert)
3611 diffopts = patch.difffeatureopts(
3611 diffopts = patch.difffeatureopts(
3612 repo.ui,
3612 repo.ui,
3613 whitespace=True,
3613 whitespace=True,
3614 section=b'commands',
3614 section=b'commands',
3615 configprefix=b'revert.interactive.',
3615 configprefix=b'revert.interactive.',
3616 )
3616 )
3617 diffopts.nodates = True
3617 diffopts.nodates = True
3618 diffopts.git = True
3618 diffopts.git = True
3619 operation = b'apply'
3619 operation = b'apply'
3620 if node == parent:
3620 if node == parent:
3621 if repo.ui.configbool(
3621 if repo.ui.configbool(
3622 b'experimental', b'revert.interactive.select-to-keep'
3622 b'experimental', b'revert.interactive.select-to-keep'
3623 ):
3623 ):
3624 operation = b'keep'
3624 operation = b'keep'
3625 else:
3625 else:
3626 operation = b'discard'
3626 operation = b'discard'
3627
3627
3628 if operation == b'apply':
3628 if operation == b'apply':
3629 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3629 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3630 else:
3630 else:
3631 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3631 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3632 originalchunks = patch.parsepatch(diff)
3632 originalchunks = patch.parsepatch(diff)
3633
3633
3634 try:
3634 try:
3635
3635
3636 chunks, opts = recordfilter(
3636 chunks, opts = recordfilter(
3637 repo.ui, originalchunks, match, operation=operation
3637 repo.ui, originalchunks, match, operation=operation
3638 )
3638 )
3639 if operation == b'discard':
3639 if operation == b'discard':
3640 chunks = patch.reversehunks(chunks)
3640 chunks = patch.reversehunks(chunks)
3641
3641
3642 except error.PatchError as err:
3642 except error.PatchError as err:
3643 raise error.Abort(_(b'error parsing patch: %s') % err)
3643 raise error.Abort(_(b'error parsing patch: %s') % err)
3644
3644
3645 # FIXME: when doing an interactive revert of a copy, there's no way of
3645 # FIXME: when doing an interactive revert of a copy, there's no way of
3646 # performing a partial revert of the added file, the only option is
3646 # performing a partial revert of the added file, the only option is
3647 # "remove added file <name> (Yn)?", so we don't need to worry about the
3647 # "remove added file <name> (Yn)?", so we don't need to worry about the
3648 # alsorestore value. Ideally we'd be able to partially revert
3648 # alsorestore value. Ideally we'd be able to partially revert
3649 # copied/renamed files.
3649 # copied/renamed files.
3650 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3650 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3651 chunks, originalchunks
3651 chunks, originalchunks
3652 )
3652 )
3653 if tobackup is None:
3653 if tobackup is None:
3654 tobackup = set()
3654 tobackup = set()
3655 # Apply changes
3655 # Apply changes
3656 fp = stringio()
3656 fp = stringio()
3657 # chunks are serialized per file, but files aren't sorted
3657 # chunks are serialized per file, but files aren't sorted
3658 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3658 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3659 prntstatusmsg(b'revert', f)
3659 prntstatusmsg(b'revert', f)
3660 files = set()
3660 files = set()
3661 for c in chunks:
3661 for c in chunks:
3662 if ishunk(c):
3662 if ishunk(c):
3663 abs = c.header.filename()
3663 abs = c.header.filename()
3664 # Create a backup file only if this hunk should be backed up
3664 # Create a backup file only if this hunk should be backed up
3665 if c.header.filename() in tobackup:
3665 if c.header.filename() in tobackup:
3666 target = repo.wjoin(abs)
3666 target = repo.wjoin(abs)
3667 bakname = scmutil.backuppath(repo.ui, repo, abs)
3667 bakname = scmutil.backuppath(repo.ui, repo, abs)
3668 util.copyfile(target, bakname)
3668 util.copyfile(target, bakname)
3669 tobackup.remove(abs)
3669 tobackup.remove(abs)
3670 if abs not in files:
3670 if abs not in files:
3671 files.add(abs)
3671 files.add(abs)
3672 if operation == b'keep':
3672 if operation == b'keep':
3673 checkout(abs)
3673 checkout(abs)
3674 c.write(fp)
3674 c.write(fp)
3675 dopatch = fp.tell()
3675 dopatch = fp.tell()
3676 fp.seek(0)
3676 fp.seek(0)
3677 if dopatch:
3677 if dopatch:
3678 try:
3678 try:
3679 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3679 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3680 except error.PatchError as err:
3680 except error.PatchError as err:
3681 raise error.Abort(pycompat.bytestr(err))
3681 raise error.Abort(pycompat.bytestr(err))
3682 del fp
3682 del fp
3683 else:
3683 else:
3684 for f in actions[b'revert'][0]:
3684 for f in actions[b'revert'][0]:
3685 prntstatusmsg(b'revert', f)
3685 prntstatusmsg(b'revert', f)
3686 checkout(f)
3686 checkout(f)
3687 if normal:
3687 if normal:
3688 normal(f)
3688 normal(f)
3689
3689
3690 for f in actions[b'add'][0]:
3690 for f in actions[b'add'][0]:
3691 # Don't checkout modified files, they are already created by the diff
3691 # Don't checkout modified files, they are already created by the diff
3692 if f not in newlyaddedandmodifiedfiles:
3692 if f not in newlyaddedandmodifiedfiles:
3693 prntstatusmsg(b'add', f)
3693 prntstatusmsg(b'add', f)
3694 checkout(f)
3694 checkout(f)
3695 repo.dirstate.add(f)
3695 repo.dirstate.add(f)
3696
3696
3697 normal = repo.dirstate.normallookup
3697 normal = repo.dirstate.normallookup
3698 if node == parent and p2 == repo.nullid:
3698 if node == parent and p2 == repo.nullid:
3699 normal = repo.dirstate.normal
3699 normal = repo.dirstate.normal
3700 for f in actions[b'undelete'][0]:
3700 for f in actions[b'undelete'][0]:
3701 if interactive:
3701 if interactive:
3702 choice = repo.ui.promptchoice(
3702 choice = repo.ui.promptchoice(
3703 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3703 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3704 )
3704 )
3705 if choice == 0:
3705 if choice == 0:
3706 prntstatusmsg(b'undelete', f)
3706 prntstatusmsg(b'undelete', f)
3707 checkout(f)
3707 checkout(f)
3708 normal(f)
3708 normal(f)
3709 else:
3709 else:
3710 excluded_files.append(f)
3710 excluded_files.append(f)
3711 else:
3711 else:
3712 prntstatusmsg(b'undelete', f)
3712 prntstatusmsg(b'undelete', f)
3713 checkout(f)
3713 checkout(f)
3714 normal(f)
3714 normal(f)
3715
3715
3716 copied = copies.pathcopies(repo[parent], ctx)
3716 copied = copies.pathcopies(repo[parent], ctx)
3717
3717
3718 for f in (
3718 for f in (
3719 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3719 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3720 ):
3720 ):
3721 if f in copied:
3721 if f in copied:
3722 repo.dirstate.copy(copied[f], f)
3722 repo.dirstate.copy(copied[f], f)
3723
3723
3724
3724
3725 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3725 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3726 # commands.outgoing. "missing" is "missing" of the result of
3726 # commands.outgoing. "missing" is "missing" of the result of
3727 # "findcommonoutgoing()"
3727 # "findcommonoutgoing()"
3728 outgoinghooks = util.hooks()
3728 outgoinghooks = util.hooks()
3729
3729
3730 # a list of (ui, repo) functions called by commands.summary
3730 # a list of (ui, repo) functions called by commands.summary
3731 summaryhooks = util.hooks()
3731 summaryhooks = util.hooks()
3732
3732
3733 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3733 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3734 #
3734 #
3735 # functions should return tuple of booleans below, if 'changes' is None:
3735 # functions should return tuple of booleans below, if 'changes' is None:
3736 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3736 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3737 #
3737 #
3738 # otherwise, 'changes' is a tuple of tuples below:
3738 # otherwise, 'changes' is a tuple of tuples below:
3739 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3739 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3740 # - (desturl, destbranch, destpeer, outgoing)
3740 # - (desturl, destbranch, destpeer, outgoing)
3741 summaryremotehooks = util.hooks()
3741 summaryremotehooks = util.hooks()
3742
3742
3743
3743
3744 def checkunfinished(repo, commit=False, skipmerge=False):
3744 def checkunfinished(repo, commit=False, skipmerge=False):
3745 """Look for an unfinished multistep operation, like graft, and abort
3745 """Look for an unfinished multistep operation, like graft, and abort
3746 if found. It's probably good to check this right before
3746 if found. It's probably good to check this right before
3747 bailifchanged().
3747 bailifchanged().
3748 """
3748 """
3749 # Check for non-clearable states first, so things like rebase will take
3749 # Check for non-clearable states first, so things like rebase will take
3750 # precedence over update.
3750 # precedence over update.
3751 for state in statemod._unfinishedstates:
3751 for state in statemod._unfinishedstates:
3752 if (
3752 if (
3753 state._clearable
3753 state._clearable
3754 or (commit and state._allowcommit)
3754 or (commit and state._allowcommit)
3755 or state._reportonly
3755 or state._reportonly
3756 ):
3756 ):
3757 continue
3757 continue
3758 if state.isunfinished(repo):
3758 if state.isunfinished(repo):
3759 raise error.StateError(state.msg(), hint=state.hint())
3759 raise error.StateError(state.msg(), hint=state.hint())
3760
3760
3761 for s in statemod._unfinishedstates:
3761 for s in statemod._unfinishedstates:
3762 if (
3762 if (
3763 not s._clearable
3763 not s._clearable
3764 or (commit and s._allowcommit)
3764 or (commit and s._allowcommit)
3765 or (s._opname == b'merge' and skipmerge)
3765 or (s._opname == b'merge' and skipmerge)
3766 or s._reportonly
3766 or s._reportonly
3767 ):
3767 ):
3768 continue
3768 continue
3769 if s.isunfinished(repo):
3769 if s.isunfinished(repo):
3770 raise error.StateError(s.msg(), hint=s.hint())
3770 raise error.StateError(s.msg(), hint=s.hint())
3771
3771
3772
3772
3773 def clearunfinished(repo):
3773 def clearunfinished(repo):
3774 """Check for unfinished operations (as above), and clear the ones
3774 """Check for unfinished operations (as above), and clear the ones
3775 that are clearable.
3775 that are clearable.
3776 """
3776 """
3777 for state in statemod._unfinishedstates:
3777 for state in statemod._unfinishedstates:
3778 if state._reportonly:
3778 if state._reportonly:
3779 continue
3779 continue
3780 if not state._clearable and state.isunfinished(repo):
3780 if not state._clearable and state.isunfinished(repo):
3781 raise error.StateError(state.msg(), hint=state.hint())
3781 raise error.StateError(state.msg(), hint=state.hint())
3782
3782
3783 for s in statemod._unfinishedstates:
3783 for s in statemod._unfinishedstates:
3784 if s._opname == b'merge' or s._reportonly:
3784 if s._opname == b'merge' or s._reportonly:
3785 continue
3785 continue
3786 if s._clearable and s.isunfinished(repo):
3786 if s._clearable and s.isunfinished(repo):
3787 util.unlink(repo.vfs.join(s._fname))
3787 util.unlink(repo.vfs.join(s._fname))
3788
3788
3789
3789
3790 def getunfinishedstate(repo):
3790 def getunfinishedstate(repo):
3791 """Checks for unfinished operations and returns statecheck object
3791 """Checks for unfinished operations and returns statecheck object
3792 for it"""
3792 for it"""
3793 for state in statemod._unfinishedstates:
3793 for state in statemod._unfinishedstates:
3794 if state.isunfinished(repo):
3794 if state.isunfinished(repo):
3795 return state
3795 return state
3796 return None
3796 return None
3797
3797
3798
3798
3799 def howtocontinue(repo):
3799 def howtocontinue(repo):
3800 """Check for an unfinished operation and return the command to finish
3800 """Check for an unfinished operation and return the command to finish
3801 it.
3801 it.
3802
3802
3803 statemod._unfinishedstates list is checked for an unfinished operation
3803 statemod._unfinishedstates list is checked for an unfinished operation
3804 and the corresponding message to finish it is generated if a method to
3804 and the corresponding message to finish it is generated if a method to
3805 continue is supported by the operation.
3805 continue is supported by the operation.
3806
3806
3807 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3807 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3808 a boolean.
3808 a boolean.
3809 """
3809 """
3810 contmsg = _(b"continue: %s")
3810 contmsg = _(b"continue: %s")
3811 for state in statemod._unfinishedstates:
3811 for state in statemod._unfinishedstates:
3812 if not state._continueflag:
3812 if not state._continueflag:
3813 continue
3813 continue
3814 if state.isunfinished(repo):
3814 if state.isunfinished(repo):
3815 return contmsg % state.continuemsg(), True
3815 return contmsg % state.continuemsg(), True
3816 if repo[None].dirty(missing=True, merge=False, branch=False):
3816 if repo[None].dirty(missing=True, merge=False, branch=False):
3817 return contmsg % _(b"hg commit"), False
3817 return contmsg % _(b"hg commit"), False
3818 return None, None
3818 return None, None
3819
3819
3820
3820
3821 def checkafterresolved(repo):
3821 def checkafterresolved(repo):
3822 """Inform the user about the next action after completing hg resolve
3822 """Inform the user about the next action after completing hg resolve
3823
3823
3824 If there's a an unfinished operation that supports continue flag,
3824 If there's a an unfinished operation that supports continue flag,
3825 howtocontinue will yield repo.ui.warn as the reporter.
3825 howtocontinue will yield repo.ui.warn as the reporter.
3826
3826
3827 Otherwise, it will yield repo.ui.note.
3827 Otherwise, it will yield repo.ui.note.
3828 """
3828 """
3829 msg, warning = howtocontinue(repo)
3829 msg, warning = howtocontinue(repo)
3830 if msg is not None:
3830 if msg is not None:
3831 if warning:
3831 if warning:
3832 repo.ui.warn(b"%s\n" % msg)
3832 repo.ui.warn(b"%s\n" % msg)
3833 else:
3833 else:
3834 repo.ui.note(b"%s\n" % msg)
3834 repo.ui.note(b"%s\n" % msg)
3835
3835
3836
3836
3837 def wrongtooltocontinue(repo, task):
3837 def wrongtooltocontinue(repo, task):
3838 """Raise an abort suggesting how to properly continue if there is an
3838 """Raise an abort suggesting how to properly continue if there is an
3839 active task.
3839 active task.
3840
3840
3841 Uses howtocontinue() to find the active task.
3841 Uses howtocontinue() to find the active task.
3842
3842
3843 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3843 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3844 a hint.
3844 a hint.
3845 """
3845 """
3846 after = howtocontinue(repo)
3846 after = howtocontinue(repo)
3847 hint = None
3847 hint = None
3848 if after[1]:
3848 if after[1]:
3849 hint = after[0]
3849 hint = after[0]
3850 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3850 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3851
3851
3852
3852
3853 def abortgraft(ui, repo, graftstate):
3853 def abortgraft(ui, repo, graftstate):
3854 """abort the interrupted graft and rollbacks to the state before interrupted
3854 """abort the interrupted graft and rollbacks to the state before interrupted
3855 graft"""
3855 graft"""
3856 if not graftstate.exists():
3856 if not graftstate.exists():
3857 raise error.StateError(_(b"no interrupted graft to abort"))
3857 raise error.StateError(_(b"no interrupted graft to abort"))
3858 statedata = readgraftstate(repo, graftstate)
3858 statedata = readgraftstate(repo, graftstate)
3859 newnodes = statedata.get(b'newnodes')
3859 newnodes = statedata.get(b'newnodes')
3860 if newnodes is None:
3860 if newnodes is None:
3861 # and old graft state which does not have all the data required to abort
3861 # and old graft state which does not have all the data required to abort
3862 # the graft
3862 # the graft
3863 raise error.Abort(_(b"cannot abort using an old graftstate"))
3863 raise error.Abort(_(b"cannot abort using an old graftstate"))
3864
3864
3865 # changeset from which graft operation was started
3865 # changeset from which graft operation was started
3866 if len(newnodes) > 0:
3866 if len(newnodes) > 0:
3867 startctx = repo[newnodes[0]].p1()
3867 startctx = repo[newnodes[0]].p1()
3868 else:
3868 else:
3869 startctx = repo[b'.']
3869 startctx = repo[b'.']
3870 # whether to strip or not
3870 # whether to strip or not
3871 cleanup = False
3871 cleanup = False
3872
3872
3873 if newnodes:
3873 if newnodes:
3874 newnodes = [repo[r].rev() for r in newnodes]
3874 newnodes = [repo[r].rev() for r in newnodes]
3875 cleanup = True
3875 cleanup = True
3876 # checking that none of the newnodes turned public or is public
3876 # checking that none of the newnodes turned public or is public
3877 immutable = [c for c in newnodes if not repo[c].mutable()]
3877 immutable = [c for c in newnodes if not repo[c].mutable()]
3878 if immutable:
3878 if immutable:
3879 repo.ui.warn(
3879 repo.ui.warn(
3880 _(b"cannot clean up public changesets %s\n")
3880 _(b"cannot clean up public changesets %s\n")
3881 % b', '.join(bytes(repo[r]) for r in immutable),
3881 % b', '.join(bytes(repo[r]) for r in immutable),
3882 hint=_(b"see 'hg help phases' for details"),
3882 hint=_(b"see 'hg help phases' for details"),
3883 )
3883 )
3884 cleanup = False
3884 cleanup = False
3885
3885
3886 # checking that no new nodes are created on top of grafted revs
3886 # checking that no new nodes are created on top of grafted revs
3887 desc = set(repo.changelog.descendants(newnodes))
3887 desc = set(repo.changelog.descendants(newnodes))
3888 if desc - set(newnodes):
3888 if desc - set(newnodes):
3889 repo.ui.warn(
3889 repo.ui.warn(
3890 _(
3890 _(
3891 b"new changesets detected on destination "
3891 b"new changesets detected on destination "
3892 b"branch, can't strip\n"
3892 b"branch, can't strip\n"
3893 )
3893 )
3894 )
3894 )
3895 cleanup = False
3895 cleanup = False
3896
3896
3897 if cleanup:
3897 if cleanup:
3898 with repo.wlock(), repo.lock():
3898 with repo.wlock(), repo.lock():
3899 mergemod.clean_update(startctx)
3899 mergemod.clean_update(startctx)
3900 # stripping the new nodes created
3900 # stripping the new nodes created
3901 strippoints = [
3901 strippoints = [
3902 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3902 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3903 ]
3903 ]
3904 repair.strip(repo.ui, repo, strippoints, backup=False)
3904 repair.strip(repo.ui, repo, strippoints, backup=False)
3905
3905
3906 if not cleanup:
3906 if not cleanup:
3907 # we don't update to the startnode if we can't strip
3907 # we don't update to the startnode if we can't strip
3908 startctx = repo[b'.']
3908 startctx = repo[b'.']
3909 mergemod.clean_update(startctx)
3909 mergemod.clean_update(startctx)
3910
3910
3911 ui.status(_(b"graft aborted\n"))
3911 ui.status(_(b"graft aborted\n"))
3912 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3912 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3913 graftstate.delete()
3913 graftstate.delete()
3914 return 0
3914 return 0
3915
3915
3916
3916
3917 def readgraftstate(repo, graftstate):
3917 def readgraftstate(repo, graftstate):
3918 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3918 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3919 """read the graft state file and return a dict of the data stored in it"""
3919 """read the graft state file and return a dict of the data stored in it"""
3920 try:
3920 try:
3921 return graftstate.read()
3921 return graftstate.read()
3922 except error.CorruptedState:
3922 except error.CorruptedState:
3923 nodes = repo.vfs.read(b'graftstate').splitlines()
3923 nodes = repo.vfs.read(b'graftstate').splitlines()
3924 return {b'nodes': nodes}
3924 return {b'nodes': nodes}
3925
3925
3926
3926
3927 def hgabortgraft(ui, repo):
3927 def hgabortgraft(ui, repo):
3928 """abort logic for aborting graft using 'hg abort'"""
3928 """abort logic for aborting graft using 'hg abort'"""
3929 with repo.wlock():
3929 with repo.wlock():
3930 graftstate = statemod.cmdstate(repo, b'graftstate')
3930 graftstate = statemod.cmdstate(repo, b'graftstate')
3931 return abortgraft(ui, repo, graftstate)
3931 return abortgraft(ui, repo, graftstate)
@@ -1,286 +1,286 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import nullrev
11 from .node import nullrev
12 from . import (
12 from . import (
13 error,
13 error,
14 revlog,
14 revlog,
15 )
15 )
16 from .interfaces import (
16 from .interfaces import (
17 repository,
17 repository,
18 util as interfaceutil,
18 util as interfaceutil,
19 )
19 )
20 from .utils import storageutil
20 from .utils import storageutil
21 from .revlogutils import (
21 from .revlogutils import (
22 constants as revlog_constants,
22 constants as revlog_constants,
23 )
23 )
24
24
25
25
26 @interfaceutil.implementer(repository.ifilestorage)
26 @interfaceutil.implementer(repository.ifilestorage)
27 class filelog(object):
27 class filelog(object):
28 def __init__(self, opener, path):
28 def __init__(self, opener, path):
29 self._revlog = revlog.revlog(
29 self._revlog = revlog.revlog(
30 opener,
30 opener,
31 # XXX should use the unencoded path
31 # XXX should use the unencoded path
32 target=(revlog_constants.KIND_FILELOG, path),
32 target=(revlog_constants.KIND_FILELOG, path),
33 indexfile=b'/'.join((b'data', path + b'.i')),
33 radix=b'/'.join((b'data', path)),
34 censorable=True,
34 censorable=True,
35 )
35 )
36 # Full name of the user visible file, relative to the repository root.
36 # Full name of the user visible file, relative to the repository root.
37 # Used by LFS.
37 # Used by LFS.
38 self._revlog.filename = path
38 self._revlog.filename = path
39 self.nullid = self._revlog.nullid
39 self.nullid = self._revlog.nullid
40
40
41 def __len__(self):
41 def __len__(self):
42 return len(self._revlog)
42 return len(self._revlog)
43
43
44 def __iter__(self):
44 def __iter__(self):
45 return self._revlog.__iter__()
45 return self._revlog.__iter__()
46
46
47 def hasnode(self, node):
47 def hasnode(self, node):
48 if node in (self.nullid, nullrev):
48 if node in (self.nullid, nullrev):
49 return False
49 return False
50
50
51 try:
51 try:
52 self._revlog.rev(node)
52 self._revlog.rev(node)
53 return True
53 return True
54 except (TypeError, ValueError, IndexError, error.LookupError):
54 except (TypeError, ValueError, IndexError, error.LookupError):
55 return False
55 return False
56
56
57 def revs(self, start=0, stop=None):
57 def revs(self, start=0, stop=None):
58 return self._revlog.revs(start=start, stop=stop)
58 return self._revlog.revs(start=start, stop=stop)
59
59
60 def parents(self, node):
60 def parents(self, node):
61 return self._revlog.parents(node)
61 return self._revlog.parents(node)
62
62
63 def parentrevs(self, rev):
63 def parentrevs(self, rev):
64 return self._revlog.parentrevs(rev)
64 return self._revlog.parentrevs(rev)
65
65
66 def rev(self, node):
66 def rev(self, node):
67 return self._revlog.rev(node)
67 return self._revlog.rev(node)
68
68
69 def node(self, rev):
69 def node(self, rev):
70 return self._revlog.node(rev)
70 return self._revlog.node(rev)
71
71
72 def lookup(self, node):
72 def lookup(self, node):
73 return storageutil.fileidlookup(
73 return storageutil.fileidlookup(
74 self._revlog, node, self._revlog._indexfile
74 self._revlog, node, self._revlog._indexfile
75 )
75 )
76
76
77 def linkrev(self, rev):
77 def linkrev(self, rev):
78 return self._revlog.linkrev(rev)
78 return self._revlog.linkrev(rev)
79
79
80 def commonancestorsheads(self, node1, node2):
80 def commonancestorsheads(self, node1, node2):
81 return self._revlog.commonancestorsheads(node1, node2)
81 return self._revlog.commonancestorsheads(node1, node2)
82
82
83 # Used by dagop.blockdescendants().
83 # Used by dagop.blockdescendants().
84 def descendants(self, revs):
84 def descendants(self, revs):
85 return self._revlog.descendants(revs)
85 return self._revlog.descendants(revs)
86
86
87 def heads(self, start=None, stop=None):
87 def heads(self, start=None, stop=None):
88 return self._revlog.heads(start, stop)
88 return self._revlog.heads(start, stop)
89
89
90 # Used by hgweb, children extension.
90 # Used by hgweb, children extension.
91 def children(self, node):
91 def children(self, node):
92 return self._revlog.children(node)
92 return self._revlog.children(node)
93
93
94 def iscensored(self, rev):
94 def iscensored(self, rev):
95 return self._revlog.iscensored(rev)
95 return self._revlog.iscensored(rev)
96
96
97 def revision(self, node, _df=None, raw=False):
97 def revision(self, node, _df=None, raw=False):
98 return self._revlog.revision(node, _df=_df, raw=raw)
98 return self._revlog.revision(node, _df=_df, raw=raw)
99
99
100 def rawdata(self, node, _df=None):
100 def rawdata(self, node, _df=None):
101 return self._revlog.rawdata(node, _df=_df)
101 return self._revlog.rawdata(node, _df=_df)
102
102
103 def emitrevisions(
103 def emitrevisions(
104 self,
104 self,
105 nodes,
105 nodes,
106 nodesorder=None,
106 nodesorder=None,
107 revisiondata=False,
107 revisiondata=False,
108 assumehaveparentrevisions=False,
108 assumehaveparentrevisions=False,
109 deltamode=repository.CG_DELTAMODE_STD,
109 deltamode=repository.CG_DELTAMODE_STD,
110 sidedata_helpers=None,
110 sidedata_helpers=None,
111 ):
111 ):
112 return self._revlog.emitrevisions(
112 return self._revlog.emitrevisions(
113 nodes,
113 nodes,
114 nodesorder=nodesorder,
114 nodesorder=nodesorder,
115 revisiondata=revisiondata,
115 revisiondata=revisiondata,
116 assumehaveparentrevisions=assumehaveparentrevisions,
116 assumehaveparentrevisions=assumehaveparentrevisions,
117 deltamode=deltamode,
117 deltamode=deltamode,
118 sidedata_helpers=sidedata_helpers,
118 sidedata_helpers=sidedata_helpers,
119 )
119 )
120
120
121 def addrevision(
121 def addrevision(
122 self,
122 self,
123 revisiondata,
123 revisiondata,
124 transaction,
124 transaction,
125 linkrev,
125 linkrev,
126 p1,
126 p1,
127 p2,
127 p2,
128 node=None,
128 node=None,
129 flags=revlog.REVIDX_DEFAULT_FLAGS,
129 flags=revlog.REVIDX_DEFAULT_FLAGS,
130 cachedelta=None,
130 cachedelta=None,
131 ):
131 ):
132 return self._revlog.addrevision(
132 return self._revlog.addrevision(
133 revisiondata,
133 revisiondata,
134 transaction,
134 transaction,
135 linkrev,
135 linkrev,
136 p1,
136 p1,
137 p2,
137 p2,
138 node=node,
138 node=node,
139 flags=flags,
139 flags=flags,
140 cachedelta=cachedelta,
140 cachedelta=cachedelta,
141 )
141 )
142
142
143 def addgroup(
143 def addgroup(
144 self,
144 self,
145 deltas,
145 deltas,
146 linkmapper,
146 linkmapper,
147 transaction,
147 transaction,
148 addrevisioncb=None,
148 addrevisioncb=None,
149 duplicaterevisioncb=None,
149 duplicaterevisioncb=None,
150 maybemissingparents=False,
150 maybemissingparents=False,
151 ):
151 ):
152 if maybemissingparents:
152 if maybemissingparents:
153 raise error.Abort(
153 raise error.Abort(
154 _(
154 _(
155 b'revlog storage does not support missing '
155 b'revlog storage does not support missing '
156 b'parents write mode'
156 b'parents write mode'
157 )
157 )
158 )
158 )
159
159
160 return self._revlog.addgroup(
160 return self._revlog.addgroup(
161 deltas,
161 deltas,
162 linkmapper,
162 linkmapper,
163 transaction,
163 transaction,
164 addrevisioncb=addrevisioncb,
164 addrevisioncb=addrevisioncb,
165 duplicaterevisioncb=duplicaterevisioncb,
165 duplicaterevisioncb=duplicaterevisioncb,
166 )
166 )
167
167
168 def getstrippoint(self, minlink):
168 def getstrippoint(self, minlink):
169 return self._revlog.getstrippoint(minlink)
169 return self._revlog.getstrippoint(minlink)
170
170
171 def strip(self, minlink, transaction):
171 def strip(self, minlink, transaction):
172 return self._revlog.strip(minlink, transaction)
172 return self._revlog.strip(minlink, transaction)
173
173
174 def censorrevision(self, tr, node, tombstone=b''):
174 def censorrevision(self, tr, node, tombstone=b''):
175 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
175 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
176
176
177 def files(self):
177 def files(self):
178 return self._revlog.files()
178 return self._revlog.files()
179
179
180 def read(self, node):
180 def read(self, node):
181 return storageutil.filtermetadata(self.revision(node))
181 return storageutil.filtermetadata(self.revision(node))
182
182
183 def add(self, text, meta, transaction, link, p1=None, p2=None):
183 def add(self, text, meta, transaction, link, p1=None, p2=None):
184 if meta or text.startswith(b'\1\n'):
184 if meta or text.startswith(b'\1\n'):
185 text = storageutil.packmeta(meta, text)
185 text = storageutil.packmeta(meta, text)
186 rev = self.addrevision(text, transaction, link, p1, p2)
186 rev = self.addrevision(text, transaction, link, p1, p2)
187 return self.node(rev)
187 return self.node(rev)
188
188
189 def renamed(self, node):
189 def renamed(self, node):
190 return storageutil.filerevisioncopied(self, node)
190 return storageutil.filerevisioncopied(self, node)
191
191
192 def size(self, rev):
192 def size(self, rev):
193 """return the size of a given revision"""
193 """return the size of a given revision"""
194
194
195 # for revisions with renames, we have to go the slow way
195 # for revisions with renames, we have to go the slow way
196 node = self.node(rev)
196 node = self.node(rev)
197 if self.renamed(node):
197 if self.renamed(node):
198 return len(self.read(node))
198 return len(self.read(node))
199 if self.iscensored(rev):
199 if self.iscensored(rev):
200 return 0
200 return 0
201
201
202 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
202 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
203 return self._revlog.size(rev)
203 return self._revlog.size(rev)
204
204
205 def cmp(self, node, text):
205 def cmp(self, node, text):
206 """compare text with a given file revision
206 """compare text with a given file revision
207
207
208 returns True if text is different than what is stored.
208 returns True if text is different than what is stored.
209 """
209 """
210 return not storageutil.filedataequivalent(self, node, text)
210 return not storageutil.filedataequivalent(self, node, text)
211
211
212 def verifyintegrity(self, state):
212 def verifyintegrity(self, state):
213 return self._revlog.verifyintegrity(state)
213 return self._revlog.verifyintegrity(state)
214
214
215 def storageinfo(
215 def storageinfo(
216 self,
216 self,
217 exclusivefiles=False,
217 exclusivefiles=False,
218 sharedfiles=False,
218 sharedfiles=False,
219 revisionscount=False,
219 revisionscount=False,
220 trackedsize=False,
220 trackedsize=False,
221 storedsize=False,
221 storedsize=False,
222 ):
222 ):
223 return self._revlog.storageinfo(
223 return self._revlog.storageinfo(
224 exclusivefiles=exclusivefiles,
224 exclusivefiles=exclusivefiles,
225 sharedfiles=sharedfiles,
225 sharedfiles=sharedfiles,
226 revisionscount=revisionscount,
226 revisionscount=revisionscount,
227 trackedsize=trackedsize,
227 trackedsize=trackedsize,
228 storedsize=storedsize,
228 storedsize=storedsize,
229 )
229 )
230
230
231 # Used by repo upgrade.
231 # Used by repo upgrade.
232 def clone(self, tr, destrevlog, **kwargs):
232 def clone(self, tr, destrevlog, **kwargs):
233 if not isinstance(destrevlog, filelog):
233 if not isinstance(destrevlog, filelog):
234 raise error.ProgrammingError(b'expected filelog to clone()')
234 raise error.ProgrammingError(b'expected filelog to clone()')
235
235
236 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
236 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
237
237
238
238
239 class narrowfilelog(filelog):
239 class narrowfilelog(filelog):
240 """Filelog variation to be used with narrow stores."""
240 """Filelog variation to be used with narrow stores."""
241
241
242 def __init__(self, opener, path, narrowmatch):
242 def __init__(self, opener, path, narrowmatch):
243 super(narrowfilelog, self).__init__(opener, path)
243 super(narrowfilelog, self).__init__(opener, path)
244 self._narrowmatch = narrowmatch
244 self._narrowmatch = narrowmatch
245
245
246 def renamed(self, node):
246 def renamed(self, node):
247 res = super(narrowfilelog, self).renamed(node)
247 res = super(narrowfilelog, self).renamed(node)
248
248
249 # Renames that come from outside the narrowspec are problematic
249 # Renames that come from outside the narrowspec are problematic
250 # because we may lack the base text for the rename. This can result
250 # because we may lack the base text for the rename. This can result
251 # in code attempting to walk the ancestry or compute a diff
251 # in code attempting to walk the ancestry or compute a diff
252 # encountering a missing revision. We address this by silently
252 # encountering a missing revision. We address this by silently
253 # removing rename metadata if the source file is outside the
253 # removing rename metadata if the source file is outside the
254 # narrow spec.
254 # narrow spec.
255 #
255 #
256 # A better solution would be to see if the base revision is available,
256 # A better solution would be to see if the base revision is available,
257 # rather than assuming it isn't.
257 # rather than assuming it isn't.
258 #
258 #
259 # An even better solution would be to teach all consumers of rename
259 # An even better solution would be to teach all consumers of rename
260 # metadata that the base revision may not be available.
260 # metadata that the base revision may not be available.
261 #
261 #
262 # TODO consider better ways of doing this.
262 # TODO consider better ways of doing this.
263 if res and not self._narrowmatch(res[0]):
263 if res and not self._narrowmatch(res[0]):
264 return None
264 return None
265
265
266 return res
266 return res
267
267
268 def size(self, rev):
268 def size(self, rev):
269 # Because we have a custom renamed() that may lie, we need to call
269 # Because we have a custom renamed() that may lie, we need to call
270 # the base renamed() to report accurate results.
270 # the base renamed() to report accurate results.
271 node = self.node(rev)
271 node = self.node(rev)
272 if super(narrowfilelog, self).renamed(node):
272 if super(narrowfilelog, self).renamed(node):
273 return len(self.read(node))
273 return len(self.read(node))
274 else:
274 else:
275 return super(narrowfilelog, self).size(rev)
275 return super(narrowfilelog, self).size(rev)
276
276
277 def cmp(self, node, text):
277 def cmp(self, node, text):
278 # We don't call `super` because narrow parents can be buggy in case of a
278 # We don't call `super` because narrow parents can be buggy in case of a
279 # ambiguous dirstate. Always take the slow path until there is a better
279 # ambiguous dirstate. Always take the slow path until there is a better
280 # fix, see issue6150.
280 # fix, see issue6150.
281
281
282 # Censored files compare against the empty file.
282 # Censored files compare against the empty file.
283 if self.iscensored(self.rev(node)):
283 if self.iscensored(self.rev(node)):
284 return text != b''
284 return text != b''
285
285
286 return self.read(node) != text
286 return self.read(node) != text
@@ -1,2376 +1,2374 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullrev,
19 nullrev,
20 )
20 )
21 from .pycompat import getattr
21 from .pycompat import getattr
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 pathutil,
27 pathutil,
28 policy,
28 policy,
29 pycompat,
29 pycompat,
30 revlog,
30 revlog,
31 util,
31 util,
32 )
32 )
33 from .interfaces import (
33 from .interfaces import (
34 repository,
34 repository,
35 util as interfaceutil,
35 util as interfaceutil,
36 )
36 )
37 from .revlogutils import (
37 from .revlogutils import (
38 constants as revlog_constants,
38 constants as revlog_constants,
39 )
39 )
40
40
41 parsers = policy.importmod('parsers')
41 parsers = policy.importmod('parsers')
42 propertycache = util.propertycache
42 propertycache = util.propertycache
43
43
44 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
44 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
45 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
45 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
46
46
47
47
48 def _parse(nodelen, data):
48 def _parse(nodelen, data):
49 # This method does a little bit of excessive-looking
49 # This method does a little bit of excessive-looking
50 # precondition checking. This is so that the behavior of this
50 # precondition checking. This is so that the behavior of this
51 # class exactly matches its C counterpart to try and help
51 # class exactly matches its C counterpart to try and help
52 # prevent surprise breakage for anyone that develops against
52 # prevent surprise breakage for anyone that develops against
53 # the pure version.
53 # the pure version.
54 if data and data[-1:] != b'\n':
54 if data and data[-1:] != b'\n':
55 raise ValueError(b'Manifest did not end in a newline.')
55 raise ValueError(b'Manifest did not end in a newline.')
56 prev = None
56 prev = None
57 for l in data.splitlines():
57 for l in data.splitlines():
58 if prev is not None and prev > l:
58 if prev is not None and prev > l:
59 raise ValueError(b'Manifest lines not in sorted order.')
59 raise ValueError(b'Manifest lines not in sorted order.')
60 prev = l
60 prev = l
61 f, n = l.split(b'\0')
61 f, n = l.split(b'\0')
62 nl = len(n)
62 nl = len(n)
63 flags = n[-1:]
63 flags = n[-1:]
64 if flags in _manifestflags:
64 if flags in _manifestflags:
65 n = n[:-1]
65 n = n[:-1]
66 nl -= 1
66 nl -= 1
67 else:
67 else:
68 flags = b''
68 flags = b''
69 if nl != 2 * nodelen:
69 if nl != 2 * nodelen:
70 raise ValueError(b'Invalid manifest line')
70 raise ValueError(b'Invalid manifest line')
71
71
72 yield f, bin(n), flags
72 yield f, bin(n), flags
73
73
74
74
75 def _text(it):
75 def _text(it):
76 files = []
76 files = []
77 lines = []
77 lines = []
78 for f, n, fl in it:
78 for f, n, fl in it:
79 files.append(f)
79 files.append(f)
80 # if this is changed to support newlines in filenames,
80 # if this is changed to support newlines in filenames,
81 # be sure to check the templates/ dir again (especially *-raw.tmpl)
81 # be sure to check the templates/ dir again (especially *-raw.tmpl)
82 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
82 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
83
83
84 _checkforbidden(files)
84 _checkforbidden(files)
85 return b''.join(lines)
85 return b''.join(lines)
86
86
87
87
88 class lazymanifestiter(object):
88 class lazymanifestiter(object):
89 def __init__(self, lm):
89 def __init__(self, lm):
90 self.pos = 0
90 self.pos = 0
91 self.lm = lm
91 self.lm = lm
92
92
93 def __iter__(self):
93 def __iter__(self):
94 return self
94 return self
95
95
96 def next(self):
96 def next(self):
97 try:
97 try:
98 data, pos = self.lm._get(self.pos)
98 data, pos = self.lm._get(self.pos)
99 except IndexError:
99 except IndexError:
100 raise StopIteration
100 raise StopIteration
101 if pos == -1:
101 if pos == -1:
102 self.pos += 1
102 self.pos += 1
103 return data[0]
103 return data[0]
104 self.pos += 1
104 self.pos += 1
105 zeropos = data.find(b'\x00', pos)
105 zeropos = data.find(b'\x00', pos)
106 return data[pos:zeropos]
106 return data[pos:zeropos]
107
107
108 __next__ = next
108 __next__ = next
109
109
110
110
111 class lazymanifestiterentries(object):
111 class lazymanifestiterentries(object):
112 def __init__(self, lm):
112 def __init__(self, lm):
113 self.lm = lm
113 self.lm = lm
114 self.pos = 0
114 self.pos = 0
115
115
116 def __iter__(self):
116 def __iter__(self):
117 return self
117 return self
118
118
119 def next(self):
119 def next(self):
120 try:
120 try:
121 data, pos = self.lm._get(self.pos)
121 data, pos = self.lm._get(self.pos)
122 except IndexError:
122 except IndexError:
123 raise StopIteration
123 raise StopIteration
124 if pos == -1:
124 if pos == -1:
125 self.pos += 1
125 self.pos += 1
126 return data
126 return data
127 zeropos = data.find(b'\x00', pos)
127 zeropos = data.find(b'\x00', pos)
128 nlpos = data.find(b'\n', pos)
128 nlpos = data.find(b'\n', pos)
129 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
129 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
130 raise error.StorageError(b'Invalid manifest line')
130 raise error.StorageError(b'Invalid manifest line')
131 flags = data[nlpos - 1 : nlpos]
131 flags = data[nlpos - 1 : nlpos]
132 if flags in _manifestflags:
132 if flags in _manifestflags:
133 hlen = nlpos - zeropos - 2
133 hlen = nlpos - zeropos - 2
134 else:
134 else:
135 hlen = nlpos - zeropos - 1
135 hlen = nlpos - zeropos - 1
136 flags = b''
136 flags = b''
137 if hlen != 2 * self.lm._nodelen:
137 if hlen != 2 * self.lm._nodelen:
138 raise error.StorageError(b'Invalid manifest line')
138 raise error.StorageError(b'Invalid manifest line')
139 hashval = unhexlify(
139 hashval = unhexlify(
140 data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
140 data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
141 )
141 )
142 self.pos += 1
142 self.pos += 1
143 return (data[pos:zeropos], hashval, flags)
143 return (data[pos:zeropos], hashval, flags)
144
144
145 __next__ = next
145 __next__ = next
146
146
147
147
148 def unhexlify(data, extra, pos, length):
148 def unhexlify(data, extra, pos, length):
149 s = bin(data[pos : pos + length])
149 s = bin(data[pos : pos + length])
150 if extra:
150 if extra:
151 s += chr(extra & 0xFF)
151 s += chr(extra & 0xFF)
152 return s
152 return s
153
153
154
154
155 def _cmp(a, b):
155 def _cmp(a, b):
156 return (a > b) - (a < b)
156 return (a > b) - (a < b)
157
157
158
158
159 _manifestflags = {b'', b'l', b't', b'x'}
159 _manifestflags = {b'', b'l', b't', b'x'}
160
160
161
161
162 class _lazymanifest(object):
162 class _lazymanifest(object):
163 """A pure python manifest backed by a byte string. It is supplimented with
163 """A pure python manifest backed by a byte string. It is supplimented with
164 internal lists as it is modified, until it is compacted back to a pure byte
164 internal lists as it is modified, until it is compacted back to a pure byte
165 string.
165 string.
166
166
167 ``data`` is the initial manifest data.
167 ``data`` is the initial manifest data.
168
168
169 ``positions`` is a list of offsets, one per manifest entry. Positive
169 ``positions`` is a list of offsets, one per manifest entry. Positive
170 values are offsets into ``data``, negative values are offsets into the
170 values are offsets into ``data``, negative values are offsets into the
171 ``extradata`` list. When an entry is removed, its entry is dropped from
171 ``extradata`` list. When an entry is removed, its entry is dropped from
172 ``positions``. The values are encoded such that when walking the list and
172 ``positions``. The values are encoded such that when walking the list and
173 indexing into ``data`` or ``extradata`` as appropriate, the entries are
173 indexing into ``data`` or ``extradata`` as appropriate, the entries are
174 sorted by filename.
174 sorted by filename.
175
175
176 ``extradata`` is a list of (key, hash, flags) for entries that were added or
176 ``extradata`` is a list of (key, hash, flags) for entries that were added or
177 modified since the manifest was created or compacted.
177 modified since the manifest was created or compacted.
178 """
178 """
179
179
180 def __init__(
180 def __init__(
181 self,
181 self,
182 nodelen,
182 nodelen,
183 data,
183 data,
184 positions=None,
184 positions=None,
185 extrainfo=None,
185 extrainfo=None,
186 extradata=None,
186 extradata=None,
187 hasremovals=False,
187 hasremovals=False,
188 ):
188 ):
189 self._nodelen = nodelen
189 self._nodelen = nodelen
190 if positions is None:
190 if positions is None:
191 self.positions = self.findlines(data)
191 self.positions = self.findlines(data)
192 self.extrainfo = [0] * len(self.positions)
192 self.extrainfo = [0] * len(self.positions)
193 self.data = data
193 self.data = data
194 self.extradata = []
194 self.extradata = []
195 self.hasremovals = False
195 self.hasremovals = False
196 else:
196 else:
197 self.positions = positions[:]
197 self.positions = positions[:]
198 self.extrainfo = extrainfo[:]
198 self.extrainfo = extrainfo[:]
199 self.extradata = extradata[:]
199 self.extradata = extradata[:]
200 self.data = data
200 self.data = data
201 self.hasremovals = hasremovals
201 self.hasremovals = hasremovals
202
202
203 def findlines(self, data):
203 def findlines(self, data):
204 if not data:
204 if not data:
205 return []
205 return []
206 pos = data.find(b"\n")
206 pos = data.find(b"\n")
207 if pos == -1 or data[-1:] != b'\n':
207 if pos == -1 or data[-1:] != b'\n':
208 raise ValueError(b"Manifest did not end in a newline.")
208 raise ValueError(b"Manifest did not end in a newline.")
209 positions = [0]
209 positions = [0]
210 prev = data[: data.find(b'\x00')]
210 prev = data[: data.find(b'\x00')]
211 while pos < len(data) - 1 and pos != -1:
211 while pos < len(data) - 1 and pos != -1:
212 positions.append(pos + 1)
212 positions.append(pos + 1)
213 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
213 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
214 if nexts < prev:
214 if nexts < prev:
215 raise ValueError(b"Manifest lines not in sorted order.")
215 raise ValueError(b"Manifest lines not in sorted order.")
216 prev = nexts
216 prev = nexts
217 pos = data.find(b"\n", pos + 1)
217 pos = data.find(b"\n", pos + 1)
218 return positions
218 return positions
219
219
220 def _get(self, index):
220 def _get(self, index):
221 # get the position encoded in pos:
221 # get the position encoded in pos:
222 # positive number is an index in 'data'
222 # positive number is an index in 'data'
223 # negative number is in extrapieces
223 # negative number is in extrapieces
224 pos = self.positions[index]
224 pos = self.positions[index]
225 if pos >= 0:
225 if pos >= 0:
226 return self.data, pos
226 return self.data, pos
227 return self.extradata[-pos - 1], -1
227 return self.extradata[-pos - 1], -1
228
228
229 def _getkey(self, pos):
229 def _getkey(self, pos):
230 if pos >= 0:
230 if pos >= 0:
231 return self.data[pos : self.data.find(b'\x00', pos + 1)]
231 return self.data[pos : self.data.find(b'\x00', pos + 1)]
232 return self.extradata[-pos - 1][0]
232 return self.extradata[-pos - 1][0]
233
233
234 def bsearch(self, key):
234 def bsearch(self, key):
235 first = 0
235 first = 0
236 last = len(self.positions) - 1
236 last = len(self.positions) - 1
237
237
238 while first <= last:
238 while first <= last:
239 midpoint = (first + last) // 2
239 midpoint = (first + last) // 2
240 nextpos = self.positions[midpoint]
240 nextpos = self.positions[midpoint]
241 candidate = self._getkey(nextpos)
241 candidate = self._getkey(nextpos)
242 r = _cmp(key, candidate)
242 r = _cmp(key, candidate)
243 if r == 0:
243 if r == 0:
244 return midpoint
244 return midpoint
245 else:
245 else:
246 if r < 0:
246 if r < 0:
247 last = midpoint - 1
247 last = midpoint - 1
248 else:
248 else:
249 first = midpoint + 1
249 first = midpoint + 1
250 return -1
250 return -1
251
251
252 def bsearch2(self, key):
252 def bsearch2(self, key):
253 # same as the above, but will always return the position
253 # same as the above, but will always return the position
254 # done for performance reasons
254 # done for performance reasons
255 first = 0
255 first = 0
256 last = len(self.positions) - 1
256 last = len(self.positions) - 1
257
257
258 while first <= last:
258 while first <= last:
259 midpoint = (first + last) // 2
259 midpoint = (first + last) // 2
260 nextpos = self.positions[midpoint]
260 nextpos = self.positions[midpoint]
261 candidate = self._getkey(nextpos)
261 candidate = self._getkey(nextpos)
262 r = _cmp(key, candidate)
262 r = _cmp(key, candidate)
263 if r == 0:
263 if r == 0:
264 return (midpoint, True)
264 return (midpoint, True)
265 else:
265 else:
266 if r < 0:
266 if r < 0:
267 last = midpoint - 1
267 last = midpoint - 1
268 else:
268 else:
269 first = midpoint + 1
269 first = midpoint + 1
270 return (first, False)
270 return (first, False)
271
271
272 def __contains__(self, key):
272 def __contains__(self, key):
273 return self.bsearch(key) != -1
273 return self.bsearch(key) != -1
274
274
275 def __getitem__(self, key):
275 def __getitem__(self, key):
276 if not isinstance(key, bytes):
276 if not isinstance(key, bytes):
277 raise TypeError(b"getitem: manifest keys must be a bytes.")
277 raise TypeError(b"getitem: manifest keys must be a bytes.")
278 needle = self.bsearch(key)
278 needle = self.bsearch(key)
279 if needle == -1:
279 if needle == -1:
280 raise KeyError
280 raise KeyError
281 data, pos = self._get(needle)
281 data, pos = self._get(needle)
282 if pos == -1:
282 if pos == -1:
283 return (data[1], data[2])
283 return (data[1], data[2])
284 zeropos = data.find(b'\x00', pos)
284 zeropos = data.find(b'\x00', pos)
285 nlpos = data.find(b'\n', zeropos)
285 nlpos = data.find(b'\n', zeropos)
286 assert 0 <= needle <= len(self.positions)
286 assert 0 <= needle <= len(self.positions)
287 assert len(self.extrainfo) == len(self.positions)
287 assert len(self.extrainfo) == len(self.positions)
288 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
288 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
289 raise error.StorageError(b'Invalid manifest line')
289 raise error.StorageError(b'Invalid manifest line')
290 hlen = nlpos - zeropos - 1
290 hlen = nlpos - zeropos - 1
291 flags = data[nlpos - 1 : nlpos]
291 flags = data[nlpos - 1 : nlpos]
292 if flags in _manifestflags:
292 if flags in _manifestflags:
293 hlen -= 1
293 hlen -= 1
294 else:
294 else:
295 flags = b''
295 flags = b''
296 if hlen != 2 * self._nodelen:
296 if hlen != 2 * self._nodelen:
297 raise error.StorageError(b'Invalid manifest line')
297 raise error.StorageError(b'Invalid manifest line')
298 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
298 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
299 return (hashval, flags)
299 return (hashval, flags)
300
300
301 def __delitem__(self, key):
301 def __delitem__(self, key):
302 needle, found = self.bsearch2(key)
302 needle, found = self.bsearch2(key)
303 if not found:
303 if not found:
304 raise KeyError
304 raise KeyError
305 cur = self.positions[needle]
305 cur = self.positions[needle]
306 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
306 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
307 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
307 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
308 if cur >= 0:
308 if cur >= 0:
309 # This does NOT unsort the list as far as the search functions are
309 # This does NOT unsort the list as far as the search functions are
310 # concerned, as they only examine lines mapped by self.positions.
310 # concerned, as they only examine lines mapped by self.positions.
311 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
311 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
312 self.hasremovals = True
312 self.hasremovals = True
313
313
314 def __setitem__(self, key, value):
314 def __setitem__(self, key, value):
315 if not isinstance(key, bytes):
315 if not isinstance(key, bytes):
316 raise TypeError(b"setitem: manifest keys must be a byte string.")
316 raise TypeError(b"setitem: manifest keys must be a byte string.")
317 if not isinstance(value, tuple) or len(value) != 2:
317 if not isinstance(value, tuple) or len(value) != 2:
318 raise TypeError(
318 raise TypeError(
319 b"Manifest values must be a tuple of (node, flags)."
319 b"Manifest values must be a tuple of (node, flags)."
320 )
320 )
321 hashval = value[0]
321 hashval = value[0]
322 if not isinstance(hashval, bytes) or len(hashval) not in (20, 32):
322 if not isinstance(hashval, bytes) or len(hashval) not in (20, 32):
323 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
323 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
324 flags = value[1]
324 flags = value[1]
325 if not isinstance(flags, bytes) or len(flags) > 1:
325 if not isinstance(flags, bytes) or len(flags) > 1:
326 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
326 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
327 needle, found = self.bsearch2(key)
327 needle, found = self.bsearch2(key)
328 if found:
328 if found:
329 # put the item
329 # put the item
330 pos = self.positions[needle]
330 pos = self.positions[needle]
331 if pos < 0:
331 if pos < 0:
332 self.extradata[-pos - 1] = (key, hashval, value[1])
332 self.extradata[-pos - 1] = (key, hashval, value[1])
333 else:
333 else:
334 # just don't bother
334 # just don't bother
335 self.extradata.append((key, hashval, value[1]))
335 self.extradata.append((key, hashval, value[1]))
336 self.positions[needle] = -len(self.extradata)
336 self.positions[needle] = -len(self.extradata)
337 else:
337 else:
338 # not found, put it in with extra positions
338 # not found, put it in with extra positions
339 self.extradata.append((key, hashval, value[1]))
339 self.extradata.append((key, hashval, value[1]))
340 self.positions = (
340 self.positions = (
341 self.positions[:needle]
341 self.positions[:needle]
342 + [-len(self.extradata)]
342 + [-len(self.extradata)]
343 + self.positions[needle:]
343 + self.positions[needle:]
344 )
344 )
345 self.extrainfo = (
345 self.extrainfo = (
346 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
346 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
347 )
347 )
348
348
349 def copy(self):
349 def copy(self):
350 # XXX call _compact like in C?
350 # XXX call _compact like in C?
351 return _lazymanifest(
351 return _lazymanifest(
352 self._nodelen,
352 self._nodelen,
353 self.data,
353 self.data,
354 self.positions,
354 self.positions,
355 self.extrainfo,
355 self.extrainfo,
356 self.extradata,
356 self.extradata,
357 self.hasremovals,
357 self.hasremovals,
358 )
358 )
359
359
360 def _compact(self):
360 def _compact(self):
361 # hopefully not called TOO often
361 # hopefully not called TOO often
362 if len(self.extradata) == 0 and not self.hasremovals:
362 if len(self.extradata) == 0 and not self.hasremovals:
363 return
363 return
364 l = []
364 l = []
365 i = 0
365 i = 0
366 offset = 0
366 offset = 0
367 self.extrainfo = [0] * len(self.positions)
367 self.extrainfo = [0] * len(self.positions)
368 while i < len(self.positions):
368 while i < len(self.positions):
369 if self.positions[i] >= 0:
369 if self.positions[i] >= 0:
370 cur = self.positions[i]
370 cur = self.positions[i]
371 last_cut = cur
371 last_cut = cur
372
372
373 # Collect all contiguous entries in the buffer at the current
373 # Collect all contiguous entries in the buffer at the current
374 # offset, breaking out only for added/modified items held in
374 # offset, breaking out only for added/modified items held in
375 # extradata, or a deleted line prior to the next position.
375 # extradata, or a deleted line prior to the next position.
376 while True:
376 while True:
377 self.positions[i] = offset
377 self.positions[i] = offset
378 i += 1
378 i += 1
379 if i == len(self.positions) or self.positions[i] < 0:
379 if i == len(self.positions) or self.positions[i] < 0:
380 break
380 break
381
381
382 # A removed file has no positions[] entry, but does have an
382 # A removed file has no positions[] entry, but does have an
383 # overwritten first byte. Break out and find the end of the
383 # overwritten first byte. Break out and find the end of the
384 # current good entry/entries if there is a removed file
384 # current good entry/entries if there is a removed file
385 # before the next position.
385 # before the next position.
386 if (
386 if (
387 self.hasremovals
387 self.hasremovals
388 and self.data.find(b'\n\x00', cur, self.positions[i])
388 and self.data.find(b'\n\x00', cur, self.positions[i])
389 != -1
389 != -1
390 ):
390 ):
391 break
391 break
392
392
393 offset += self.positions[i] - cur
393 offset += self.positions[i] - cur
394 cur = self.positions[i]
394 cur = self.positions[i]
395 end_cut = self.data.find(b'\n', cur)
395 end_cut = self.data.find(b'\n', cur)
396 if end_cut != -1:
396 if end_cut != -1:
397 end_cut += 1
397 end_cut += 1
398 offset += end_cut - cur
398 offset += end_cut - cur
399 l.append(self.data[last_cut:end_cut])
399 l.append(self.data[last_cut:end_cut])
400 else:
400 else:
401 while i < len(self.positions) and self.positions[i] < 0:
401 while i < len(self.positions) and self.positions[i] < 0:
402 cur = self.positions[i]
402 cur = self.positions[i]
403 t = self.extradata[-cur - 1]
403 t = self.extradata[-cur - 1]
404 l.append(self._pack(t))
404 l.append(self._pack(t))
405 self.positions[i] = offset
405 self.positions[i] = offset
406 # Hashes are either 20 bytes (old sha1s) or 32
406 # Hashes are either 20 bytes (old sha1s) or 32
407 # bytes (new non-sha1).
407 # bytes (new non-sha1).
408 hlen = 20
408 hlen = 20
409 if len(t[1]) > 25:
409 if len(t[1]) > 25:
410 hlen = 32
410 hlen = 32
411 if len(t[1]) > hlen:
411 if len(t[1]) > hlen:
412 self.extrainfo[i] = ord(t[1][hlen + 1])
412 self.extrainfo[i] = ord(t[1][hlen + 1])
413 offset += len(l[-1])
413 offset += len(l[-1])
414 i += 1
414 i += 1
415 self.data = b''.join(l)
415 self.data = b''.join(l)
416 self.hasremovals = False
416 self.hasremovals = False
417 self.extradata = []
417 self.extradata = []
418
418
419 def _pack(self, d):
419 def _pack(self, d):
420 n = d[1]
420 n = d[1]
421 assert len(n) in (20, 32)
421 assert len(n) in (20, 32)
422 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
422 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
423
423
424 def text(self):
424 def text(self):
425 self._compact()
425 self._compact()
426 return self.data
426 return self.data
427
427
428 def diff(self, m2, clean=False):
428 def diff(self, m2, clean=False):
429 '''Finds changes between the current manifest and m2.'''
429 '''Finds changes between the current manifest and m2.'''
430 # XXX think whether efficiency matters here
430 # XXX think whether efficiency matters here
431 diff = {}
431 diff = {}
432
432
433 for fn, e1, flags in self.iterentries():
433 for fn, e1, flags in self.iterentries():
434 if fn not in m2:
434 if fn not in m2:
435 diff[fn] = (e1, flags), (None, b'')
435 diff[fn] = (e1, flags), (None, b'')
436 else:
436 else:
437 e2 = m2[fn]
437 e2 = m2[fn]
438 if (e1, flags) != e2:
438 if (e1, flags) != e2:
439 diff[fn] = (e1, flags), e2
439 diff[fn] = (e1, flags), e2
440 elif clean:
440 elif clean:
441 diff[fn] = None
441 diff[fn] = None
442
442
443 for fn, e2, flags in m2.iterentries():
443 for fn, e2, flags in m2.iterentries():
444 if fn not in self:
444 if fn not in self:
445 diff[fn] = (None, b''), (e2, flags)
445 diff[fn] = (None, b''), (e2, flags)
446
446
447 return diff
447 return diff
448
448
449 def iterentries(self):
449 def iterentries(self):
450 return lazymanifestiterentries(self)
450 return lazymanifestiterentries(self)
451
451
452 def iterkeys(self):
452 def iterkeys(self):
453 return lazymanifestiter(self)
453 return lazymanifestiter(self)
454
454
455 def __iter__(self):
455 def __iter__(self):
456 return lazymanifestiter(self)
456 return lazymanifestiter(self)
457
457
458 def __len__(self):
458 def __len__(self):
459 return len(self.positions)
459 return len(self.positions)
460
460
461 def filtercopy(self, filterfn):
461 def filtercopy(self, filterfn):
462 # XXX should be optimized
462 # XXX should be optimized
463 c = _lazymanifest(self._nodelen, b'')
463 c = _lazymanifest(self._nodelen, b'')
464 for f, n, fl in self.iterentries():
464 for f, n, fl in self.iterentries():
465 if filterfn(f):
465 if filterfn(f):
466 c[f] = n, fl
466 c[f] = n, fl
467 return c
467 return c
468
468
469
469
470 try:
470 try:
471 _lazymanifest = parsers.lazymanifest
471 _lazymanifest = parsers.lazymanifest
472 except AttributeError:
472 except AttributeError:
473 pass
473 pass
474
474
475
475
476 @interfaceutil.implementer(repository.imanifestdict)
476 @interfaceutil.implementer(repository.imanifestdict)
477 class manifestdict(object):
477 class manifestdict(object):
478 def __init__(self, nodelen, data=b''):
478 def __init__(self, nodelen, data=b''):
479 self._nodelen = nodelen
479 self._nodelen = nodelen
480 self._lm = _lazymanifest(nodelen, data)
480 self._lm = _lazymanifest(nodelen, data)
481
481
482 def __getitem__(self, key):
482 def __getitem__(self, key):
483 return self._lm[key][0]
483 return self._lm[key][0]
484
484
485 def find(self, key):
485 def find(self, key):
486 return self._lm[key]
486 return self._lm[key]
487
487
488 def __len__(self):
488 def __len__(self):
489 return len(self._lm)
489 return len(self._lm)
490
490
491 def __nonzero__(self):
491 def __nonzero__(self):
492 # nonzero is covered by the __len__ function, but implementing it here
492 # nonzero is covered by the __len__ function, but implementing it here
493 # makes it easier for extensions to override.
493 # makes it easier for extensions to override.
494 return len(self._lm) != 0
494 return len(self._lm) != 0
495
495
496 __bool__ = __nonzero__
496 __bool__ = __nonzero__
497
497
498 def __setitem__(self, key, node):
498 def __setitem__(self, key, node):
499 self._lm[key] = node, self.flags(key)
499 self._lm[key] = node, self.flags(key)
500
500
501 def __contains__(self, key):
501 def __contains__(self, key):
502 if key is None:
502 if key is None:
503 return False
503 return False
504 return key in self._lm
504 return key in self._lm
505
505
506 def __delitem__(self, key):
506 def __delitem__(self, key):
507 del self._lm[key]
507 del self._lm[key]
508
508
509 def __iter__(self):
509 def __iter__(self):
510 return self._lm.__iter__()
510 return self._lm.__iter__()
511
511
512 def iterkeys(self):
512 def iterkeys(self):
513 return self._lm.iterkeys()
513 return self._lm.iterkeys()
514
514
515 def keys(self):
515 def keys(self):
516 return list(self.iterkeys())
516 return list(self.iterkeys())
517
517
518 def filesnotin(self, m2, match=None):
518 def filesnotin(self, m2, match=None):
519 '''Set of files in this manifest that are not in the other'''
519 '''Set of files in this manifest that are not in the other'''
520 if match is not None:
520 if match is not None:
521 match = matchmod.badmatch(match, lambda path, msg: None)
521 match = matchmod.badmatch(match, lambda path, msg: None)
522 sm2 = set(m2.walk(match))
522 sm2 = set(m2.walk(match))
523 return {f for f in self.walk(match) if f not in sm2}
523 return {f for f in self.walk(match) if f not in sm2}
524 return {f for f in self if f not in m2}
524 return {f for f in self if f not in m2}
525
525
526 @propertycache
526 @propertycache
527 def _dirs(self):
527 def _dirs(self):
528 return pathutil.dirs(self)
528 return pathutil.dirs(self)
529
529
530 def dirs(self):
530 def dirs(self):
531 return self._dirs
531 return self._dirs
532
532
533 def hasdir(self, dir):
533 def hasdir(self, dir):
534 return dir in self._dirs
534 return dir in self._dirs
535
535
536 def _filesfastpath(self, match):
536 def _filesfastpath(self, match):
537 """Checks whether we can correctly and quickly iterate over matcher
537 """Checks whether we can correctly and quickly iterate over matcher
538 files instead of over manifest files."""
538 files instead of over manifest files."""
539 files = match.files()
539 files = match.files()
540 return len(files) < 100 and (
540 return len(files) < 100 and (
541 match.isexact()
541 match.isexact()
542 or (match.prefix() and all(fn in self for fn in files))
542 or (match.prefix() and all(fn in self for fn in files))
543 )
543 )
544
544
545 def walk(self, match):
545 def walk(self, match):
546 """Generates matching file names.
546 """Generates matching file names.
547
547
548 Equivalent to manifest.matches(match).iterkeys(), but without creating
548 Equivalent to manifest.matches(match).iterkeys(), but without creating
549 an entirely new manifest.
549 an entirely new manifest.
550
550
551 It also reports nonexistent files by marking them bad with match.bad().
551 It also reports nonexistent files by marking them bad with match.bad().
552 """
552 """
553 if match.always():
553 if match.always():
554 for f in iter(self):
554 for f in iter(self):
555 yield f
555 yield f
556 return
556 return
557
557
558 fset = set(match.files())
558 fset = set(match.files())
559
559
560 # avoid the entire walk if we're only looking for specific files
560 # avoid the entire walk if we're only looking for specific files
561 if self._filesfastpath(match):
561 if self._filesfastpath(match):
562 for fn in sorted(fset):
562 for fn in sorted(fset):
563 if fn in self:
563 if fn in self:
564 yield fn
564 yield fn
565 return
565 return
566
566
567 for fn in self:
567 for fn in self:
568 if fn in fset:
568 if fn in fset:
569 # specified pattern is the exact name
569 # specified pattern is the exact name
570 fset.remove(fn)
570 fset.remove(fn)
571 if match(fn):
571 if match(fn):
572 yield fn
572 yield fn
573
573
574 # for dirstate.walk, files=[''] means "walk the whole tree".
574 # for dirstate.walk, files=[''] means "walk the whole tree".
575 # follow that here, too
575 # follow that here, too
576 fset.discard(b'')
576 fset.discard(b'')
577
577
578 for fn in sorted(fset):
578 for fn in sorted(fset):
579 if not self.hasdir(fn):
579 if not self.hasdir(fn):
580 match.bad(fn, None)
580 match.bad(fn, None)
581
581
582 def _matches(self, match):
582 def _matches(self, match):
583 '''generate a new manifest filtered by the match argument'''
583 '''generate a new manifest filtered by the match argument'''
584 if match.always():
584 if match.always():
585 return self.copy()
585 return self.copy()
586
586
587 if self._filesfastpath(match):
587 if self._filesfastpath(match):
588 m = manifestdict(self._nodelen)
588 m = manifestdict(self._nodelen)
589 lm = self._lm
589 lm = self._lm
590 for fn in match.files():
590 for fn in match.files():
591 if fn in lm:
591 if fn in lm:
592 m._lm[fn] = lm[fn]
592 m._lm[fn] = lm[fn]
593 return m
593 return m
594
594
595 m = manifestdict(self._nodelen)
595 m = manifestdict(self._nodelen)
596 m._lm = self._lm.filtercopy(match)
596 m._lm = self._lm.filtercopy(match)
597 return m
597 return m
598
598
599 def diff(self, m2, match=None, clean=False):
599 def diff(self, m2, match=None, clean=False):
600 """Finds changes between the current manifest and m2.
600 """Finds changes between the current manifest and m2.
601
601
602 Args:
602 Args:
603 m2: the manifest to which this manifest should be compared.
603 m2: the manifest to which this manifest should be compared.
604 clean: if true, include files unchanged between these manifests
604 clean: if true, include files unchanged between these manifests
605 with a None value in the returned dictionary.
605 with a None value in the returned dictionary.
606
606
607 The result is returned as a dict with filename as key and
607 The result is returned as a dict with filename as key and
608 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
608 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
609 nodeid in the current/other manifest and fl1/fl2 is the flag
609 nodeid in the current/other manifest and fl1/fl2 is the flag
610 in the current/other manifest. Where the file does not exist,
610 in the current/other manifest. Where the file does not exist,
611 the nodeid will be None and the flags will be the empty
611 the nodeid will be None and the flags will be the empty
612 string.
612 string.
613 """
613 """
614 if match:
614 if match:
615 m1 = self._matches(match)
615 m1 = self._matches(match)
616 m2 = m2._matches(match)
616 m2 = m2._matches(match)
617 return m1.diff(m2, clean=clean)
617 return m1.diff(m2, clean=clean)
618 return self._lm.diff(m2._lm, clean)
618 return self._lm.diff(m2._lm, clean)
619
619
620 def setflag(self, key, flag):
620 def setflag(self, key, flag):
621 if flag not in _manifestflags:
621 if flag not in _manifestflags:
622 raise TypeError(b"Invalid manifest flag set.")
622 raise TypeError(b"Invalid manifest flag set.")
623 self._lm[key] = self[key], flag
623 self._lm[key] = self[key], flag
624
624
625 def get(self, key, default=None):
625 def get(self, key, default=None):
626 try:
626 try:
627 return self._lm[key][0]
627 return self._lm[key][0]
628 except KeyError:
628 except KeyError:
629 return default
629 return default
630
630
631 def flags(self, key):
631 def flags(self, key):
632 try:
632 try:
633 return self._lm[key][1]
633 return self._lm[key][1]
634 except KeyError:
634 except KeyError:
635 return b''
635 return b''
636
636
637 def copy(self):
637 def copy(self):
638 c = manifestdict(self._nodelen)
638 c = manifestdict(self._nodelen)
639 c._lm = self._lm.copy()
639 c._lm = self._lm.copy()
640 return c
640 return c
641
641
642 def items(self):
642 def items(self):
643 return (x[:2] for x in self._lm.iterentries())
643 return (x[:2] for x in self._lm.iterentries())
644
644
645 def iteritems(self):
645 def iteritems(self):
646 return (x[:2] for x in self._lm.iterentries())
646 return (x[:2] for x in self._lm.iterentries())
647
647
648 def iterentries(self):
648 def iterentries(self):
649 return self._lm.iterentries()
649 return self._lm.iterentries()
650
650
651 def text(self):
651 def text(self):
652 # most likely uses native version
652 # most likely uses native version
653 return self._lm.text()
653 return self._lm.text()
654
654
655 def fastdelta(self, base, changes):
655 def fastdelta(self, base, changes):
656 """Given a base manifest text as a bytearray and a list of changes
656 """Given a base manifest text as a bytearray and a list of changes
657 relative to that text, compute a delta that can be used by revlog.
657 relative to that text, compute a delta that can be used by revlog.
658 """
658 """
659 delta = []
659 delta = []
660 dstart = None
660 dstart = None
661 dend = None
661 dend = None
662 dline = [b""]
662 dline = [b""]
663 start = 0
663 start = 0
664 # zero copy representation of base as a buffer
664 # zero copy representation of base as a buffer
665 addbuf = util.buffer(base)
665 addbuf = util.buffer(base)
666
666
667 changes = list(changes)
667 changes = list(changes)
668 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
668 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
669 # start with a readonly loop that finds the offset of
669 # start with a readonly loop that finds the offset of
670 # each line and creates the deltas
670 # each line and creates the deltas
671 for f, todelete in changes:
671 for f, todelete in changes:
672 # bs will either be the index of the item or the insert point
672 # bs will either be the index of the item or the insert point
673 start, end = _msearch(addbuf, f, start)
673 start, end = _msearch(addbuf, f, start)
674 if not todelete:
674 if not todelete:
675 h, fl = self._lm[f]
675 h, fl = self._lm[f]
676 l = b"%s\0%s%s\n" % (f, hex(h), fl)
676 l = b"%s\0%s%s\n" % (f, hex(h), fl)
677 else:
677 else:
678 if start == end:
678 if start == end:
679 # item we want to delete was not found, error out
679 # item we want to delete was not found, error out
680 raise AssertionError(
680 raise AssertionError(
681 _(b"failed to remove %s from manifest") % f
681 _(b"failed to remove %s from manifest") % f
682 )
682 )
683 l = b""
683 l = b""
684 if dstart is not None and dstart <= start and dend >= start:
684 if dstart is not None and dstart <= start and dend >= start:
685 if dend < end:
685 if dend < end:
686 dend = end
686 dend = end
687 if l:
687 if l:
688 dline.append(l)
688 dline.append(l)
689 else:
689 else:
690 if dstart is not None:
690 if dstart is not None:
691 delta.append([dstart, dend, b"".join(dline)])
691 delta.append([dstart, dend, b"".join(dline)])
692 dstart = start
692 dstart = start
693 dend = end
693 dend = end
694 dline = [l]
694 dline = [l]
695
695
696 if dstart is not None:
696 if dstart is not None:
697 delta.append([dstart, dend, b"".join(dline)])
697 delta.append([dstart, dend, b"".join(dline)])
698 # apply the delta to the base, and get a delta for addrevision
698 # apply the delta to the base, and get a delta for addrevision
699 deltatext, arraytext = _addlistdelta(base, delta)
699 deltatext, arraytext = _addlistdelta(base, delta)
700 else:
700 else:
701 # For large changes, it's much cheaper to just build the text and
701 # For large changes, it's much cheaper to just build the text and
702 # diff it.
702 # diff it.
703 arraytext = bytearray(self.text())
703 arraytext = bytearray(self.text())
704 deltatext = mdiff.textdiff(
704 deltatext = mdiff.textdiff(
705 util.buffer(base), util.buffer(arraytext)
705 util.buffer(base), util.buffer(arraytext)
706 )
706 )
707
707
708 return arraytext, deltatext
708 return arraytext, deltatext
709
709
710
710
711 def _msearch(m, s, lo=0, hi=None):
711 def _msearch(m, s, lo=0, hi=None):
712 """return a tuple (start, end) that says where to find s within m.
712 """return a tuple (start, end) that says where to find s within m.
713
713
714 If the string is found m[start:end] are the line containing
714 If the string is found m[start:end] are the line containing
715 that string. If start == end the string was not found and
715 that string. If start == end the string was not found and
716 they indicate the proper sorted insertion point.
716 they indicate the proper sorted insertion point.
717
717
718 m should be a buffer, a memoryview or a byte string.
718 m should be a buffer, a memoryview or a byte string.
719 s is a byte string"""
719 s is a byte string"""
720
720
721 def advance(i, c):
721 def advance(i, c):
722 while i < lenm and m[i : i + 1] != c:
722 while i < lenm and m[i : i + 1] != c:
723 i += 1
723 i += 1
724 return i
724 return i
725
725
726 if not s:
726 if not s:
727 return (lo, lo)
727 return (lo, lo)
728 lenm = len(m)
728 lenm = len(m)
729 if not hi:
729 if not hi:
730 hi = lenm
730 hi = lenm
731 while lo < hi:
731 while lo < hi:
732 mid = (lo + hi) // 2
732 mid = (lo + hi) // 2
733 start = mid
733 start = mid
734 while start > 0 and m[start - 1 : start] != b'\n':
734 while start > 0 and m[start - 1 : start] != b'\n':
735 start -= 1
735 start -= 1
736 end = advance(start, b'\0')
736 end = advance(start, b'\0')
737 if bytes(m[start:end]) < s:
737 if bytes(m[start:end]) < s:
738 # we know that after the null there are 40 bytes of sha1
738 # we know that after the null there are 40 bytes of sha1
739 # this translates to the bisect lo = mid + 1
739 # this translates to the bisect lo = mid + 1
740 lo = advance(end + 40, b'\n') + 1
740 lo = advance(end + 40, b'\n') + 1
741 else:
741 else:
742 # this translates to the bisect hi = mid
742 # this translates to the bisect hi = mid
743 hi = start
743 hi = start
744 end = advance(lo, b'\0')
744 end = advance(lo, b'\0')
745 found = m[lo:end]
745 found = m[lo:end]
746 if s == found:
746 if s == found:
747 # we know that after the null there are 40 bytes of sha1
747 # we know that after the null there are 40 bytes of sha1
748 end = advance(end + 40, b'\n')
748 end = advance(end + 40, b'\n')
749 return (lo, end + 1)
749 return (lo, end + 1)
750 else:
750 else:
751 return (lo, lo)
751 return (lo, lo)
752
752
753
753
754 def _checkforbidden(l):
754 def _checkforbidden(l):
755 """Check filenames for illegal characters."""
755 """Check filenames for illegal characters."""
756 for f in l:
756 for f in l:
757 if b'\n' in f or b'\r' in f:
757 if b'\n' in f or b'\r' in f:
758 raise error.StorageError(
758 raise error.StorageError(
759 _(b"'\\n' and '\\r' disallowed in filenames: %r")
759 _(b"'\\n' and '\\r' disallowed in filenames: %r")
760 % pycompat.bytestr(f)
760 % pycompat.bytestr(f)
761 )
761 )
762
762
763
763
764 # apply the changes collected during the bisect loop to our addlist
764 # apply the changes collected during the bisect loop to our addlist
765 # return a delta suitable for addrevision
765 # return a delta suitable for addrevision
766 def _addlistdelta(addlist, x):
766 def _addlistdelta(addlist, x):
767 # for large addlist arrays, building a new array is cheaper
767 # for large addlist arrays, building a new array is cheaper
768 # than repeatedly modifying the existing one
768 # than repeatedly modifying the existing one
769 currentposition = 0
769 currentposition = 0
770 newaddlist = bytearray()
770 newaddlist = bytearray()
771
771
772 for start, end, content in x:
772 for start, end, content in x:
773 newaddlist += addlist[currentposition:start]
773 newaddlist += addlist[currentposition:start]
774 if content:
774 if content:
775 newaddlist += bytearray(content)
775 newaddlist += bytearray(content)
776
776
777 currentposition = end
777 currentposition = end
778
778
779 newaddlist += addlist[currentposition:]
779 newaddlist += addlist[currentposition:]
780
780
781 deltatext = b"".join(
781 deltatext = b"".join(
782 struct.pack(b">lll", start, end, len(content)) + content
782 struct.pack(b">lll", start, end, len(content)) + content
783 for start, end, content in x
783 for start, end, content in x
784 )
784 )
785 return deltatext, newaddlist
785 return deltatext, newaddlist
786
786
787
787
788 def _splittopdir(f):
788 def _splittopdir(f):
789 if b'/' in f:
789 if b'/' in f:
790 dir, subpath = f.split(b'/', 1)
790 dir, subpath = f.split(b'/', 1)
791 return dir + b'/', subpath
791 return dir + b'/', subpath
792 else:
792 else:
793 return b'', f
793 return b'', f
794
794
795
795
796 _noop = lambda s: None
796 _noop = lambda s: None
797
797
798
798
799 @interfaceutil.implementer(repository.imanifestdict)
799 @interfaceutil.implementer(repository.imanifestdict)
800 class treemanifest(object):
800 class treemanifest(object):
801 def __init__(self, nodeconstants, dir=b'', text=b''):
801 def __init__(self, nodeconstants, dir=b'', text=b''):
802 self._dir = dir
802 self._dir = dir
803 self.nodeconstants = nodeconstants
803 self.nodeconstants = nodeconstants
804 self._node = self.nodeconstants.nullid
804 self._node = self.nodeconstants.nullid
805 self._nodelen = self.nodeconstants.nodelen
805 self._nodelen = self.nodeconstants.nodelen
806 self._loadfunc = _noop
806 self._loadfunc = _noop
807 self._copyfunc = _noop
807 self._copyfunc = _noop
808 self._dirty = False
808 self._dirty = False
809 self._dirs = {}
809 self._dirs = {}
810 self._lazydirs = {}
810 self._lazydirs = {}
811 # Using _lazymanifest here is a little slower than plain old dicts
811 # Using _lazymanifest here is a little slower than plain old dicts
812 self._files = {}
812 self._files = {}
813 self._flags = {}
813 self._flags = {}
814 if text:
814 if text:
815
815
816 def readsubtree(subdir, subm):
816 def readsubtree(subdir, subm):
817 raise AssertionError(
817 raise AssertionError(
818 b'treemanifest constructor only accepts flat manifests'
818 b'treemanifest constructor only accepts flat manifests'
819 )
819 )
820
820
821 self.parse(text, readsubtree)
821 self.parse(text, readsubtree)
822 self._dirty = True # Mark flat manifest dirty after parsing
822 self._dirty = True # Mark flat manifest dirty after parsing
823
823
824 def _subpath(self, path):
824 def _subpath(self, path):
825 return self._dir + path
825 return self._dir + path
826
826
827 def _loadalllazy(self):
827 def _loadalllazy(self):
828 selfdirs = self._dirs
828 selfdirs = self._dirs
829 subpath = self._subpath
829 subpath = self._subpath
830 for d, (node, readsubtree, docopy) in pycompat.iteritems(
830 for d, (node, readsubtree, docopy) in pycompat.iteritems(
831 self._lazydirs
831 self._lazydirs
832 ):
832 ):
833 if docopy:
833 if docopy:
834 selfdirs[d] = readsubtree(subpath(d), node).copy()
834 selfdirs[d] = readsubtree(subpath(d), node).copy()
835 else:
835 else:
836 selfdirs[d] = readsubtree(subpath(d), node)
836 selfdirs[d] = readsubtree(subpath(d), node)
837 self._lazydirs = {}
837 self._lazydirs = {}
838
838
839 def _loadlazy(self, d):
839 def _loadlazy(self, d):
840 v = self._lazydirs.get(d)
840 v = self._lazydirs.get(d)
841 if v:
841 if v:
842 node, readsubtree, docopy = v
842 node, readsubtree, docopy = v
843 if docopy:
843 if docopy:
844 self._dirs[d] = readsubtree(self._subpath(d), node).copy()
844 self._dirs[d] = readsubtree(self._subpath(d), node).copy()
845 else:
845 else:
846 self._dirs[d] = readsubtree(self._subpath(d), node)
846 self._dirs[d] = readsubtree(self._subpath(d), node)
847 del self._lazydirs[d]
847 del self._lazydirs[d]
848
848
849 def _loadchildrensetlazy(self, visit):
849 def _loadchildrensetlazy(self, visit):
850 if not visit:
850 if not visit:
851 return None
851 return None
852 if visit == b'all' or visit == b'this':
852 if visit == b'all' or visit == b'this':
853 self._loadalllazy()
853 self._loadalllazy()
854 return None
854 return None
855
855
856 loadlazy = self._loadlazy
856 loadlazy = self._loadlazy
857 for k in visit:
857 for k in visit:
858 loadlazy(k + b'/')
858 loadlazy(k + b'/')
859 return visit
859 return visit
860
860
861 def _loaddifflazy(self, t1, t2):
861 def _loaddifflazy(self, t1, t2):
862 """load items in t1 and t2 if they're needed for diffing.
862 """load items in t1 and t2 if they're needed for diffing.
863
863
864 The criteria currently is:
864 The criteria currently is:
865 - if it's not present in _lazydirs in either t1 or t2, load it in the
865 - if it's not present in _lazydirs in either t1 or t2, load it in the
866 other (it may already be loaded or it may not exist, doesn't matter)
866 other (it may already be loaded or it may not exist, doesn't matter)
867 - if it's present in _lazydirs in both, compare the nodeid; if it
867 - if it's present in _lazydirs in both, compare the nodeid; if it
868 differs, load it in both
868 differs, load it in both
869 """
869 """
870 toloadlazy = []
870 toloadlazy = []
871 for d, v1 in pycompat.iteritems(t1._lazydirs):
871 for d, v1 in pycompat.iteritems(t1._lazydirs):
872 v2 = t2._lazydirs.get(d)
872 v2 = t2._lazydirs.get(d)
873 if not v2 or v2[0] != v1[0]:
873 if not v2 or v2[0] != v1[0]:
874 toloadlazy.append(d)
874 toloadlazy.append(d)
875 for d, v1 in pycompat.iteritems(t2._lazydirs):
875 for d, v1 in pycompat.iteritems(t2._lazydirs):
876 if d not in t1._lazydirs:
876 if d not in t1._lazydirs:
877 toloadlazy.append(d)
877 toloadlazy.append(d)
878
878
879 for d in toloadlazy:
879 for d in toloadlazy:
880 t1._loadlazy(d)
880 t1._loadlazy(d)
881 t2._loadlazy(d)
881 t2._loadlazy(d)
882
882
883 def __len__(self):
883 def __len__(self):
884 self._load()
884 self._load()
885 size = len(self._files)
885 size = len(self._files)
886 self._loadalllazy()
886 self._loadalllazy()
887 for m in self._dirs.values():
887 for m in self._dirs.values():
888 size += m.__len__()
888 size += m.__len__()
889 return size
889 return size
890
890
891 def __nonzero__(self):
891 def __nonzero__(self):
892 # Faster than "__len() != 0" since it avoids loading sub-manifests
892 # Faster than "__len() != 0" since it avoids loading sub-manifests
893 return not self._isempty()
893 return not self._isempty()
894
894
895 __bool__ = __nonzero__
895 __bool__ = __nonzero__
896
896
897 def _isempty(self):
897 def _isempty(self):
898 self._load() # for consistency; already loaded by all callers
898 self._load() # for consistency; already loaded by all callers
899 # See if we can skip loading everything.
899 # See if we can skip loading everything.
900 if self._files or (
900 if self._files or (
901 self._dirs and any(not m._isempty() for m in self._dirs.values())
901 self._dirs and any(not m._isempty() for m in self._dirs.values())
902 ):
902 ):
903 return False
903 return False
904 self._loadalllazy()
904 self._loadalllazy()
905 return not self._dirs or all(m._isempty() for m in self._dirs.values())
905 return not self._dirs or all(m._isempty() for m in self._dirs.values())
906
906
907 @encoding.strmethod
907 @encoding.strmethod
908 def __repr__(self):
908 def __repr__(self):
909 return (
909 return (
910 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
910 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
911 % (
911 % (
912 self._dir,
912 self._dir,
913 hex(self._node),
913 hex(self._node),
914 bool(self._loadfunc is _noop),
914 bool(self._loadfunc is _noop),
915 self._dirty,
915 self._dirty,
916 id(self),
916 id(self),
917 )
917 )
918 )
918 )
919
919
920 def dir(self):
920 def dir(self):
921 """The directory that this tree manifest represents, including a
921 """The directory that this tree manifest represents, including a
922 trailing '/'. Empty string for the repo root directory."""
922 trailing '/'. Empty string for the repo root directory."""
923 return self._dir
923 return self._dir
924
924
925 def node(self):
925 def node(self):
926 """This node of this instance. nullid for unsaved instances. Should
926 """This node of this instance. nullid for unsaved instances. Should
927 be updated when the instance is read or written from a revlog.
927 be updated when the instance is read or written from a revlog.
928 """
928 """
929 assert not self._dirty
929 assert not self._dirty
930 return self._node
930 return self._node
931
931
932 def setnode(self, node):
932 def setnode(self, node):
933 self._node = node
933 self._node = node
934 self._dirty = False
934 self._dirty = False
935
935
936 def iterentries(self):
936 def iterentries(self):
937 self._load()
937 self._load()
938 self._loadalllazy()
938 self._loadalllazy()
939 for p, n in sorted(
939 for p, n in sorted(
940 itertools.chain(self._dirs.items(), self._files.items())
940 itertools.chain(self._dirs.items(), self._files.items())
941 ):
941 ):
942 if p in self._files:
942 if p in self._files:
943 yield self._subpath(p), n, self._flags.get(p, b'')
943 yield self._subpath(p), n, self._flags.get(p, b'')
944 else:
944 else:
945 for x in n.iterentries():
945 for x in n.iterentries():
946 yield x
946 yield x
947
947
948 def items(self):
948 def items(self):
949 self._load()
949 self._load()
950 self._loadalllazy()
950 self._loadalllazy()
951 for p, n in sorted(
951 for p, n in sorted(
952 itertools.chain(self._dirs.items(), self._files.items())
952 itertools.chain(self._dirs.items(), self._files.items())
953 ):
953 ):
954 if p in self._files:
954 if p in self._files:
955 yield self._subpath(p), n
955 yield self._subpath(p), n
956 else:
956 else:
957 for f, sn in pycompat.iteritems(n):
957 for f, sn in pycompat.iteritems(n):
958 yield f, sn
958 yield f, sn
959
959
960 iteritems = items
960 iteritems = items
961
961
962 def iterkeys(self):
962 def iterkeys(self):
963 self._load()
963 self._load()
964 self._loadalllazy()
964 self._loadalllazy()
965 for p in sorted(itertools.chain(self._dirs, self._files)):
965 for p in sorted(itertools.chain(self._dirs, self._files)):
966 if p in self._files:
966 if p in self._files:
967 yield self._subpath(p)
967 yield self._subpath(p)
968 else:
968 else:
969 for f in self._dirs[p]:
969 for f in self._dirs[p]:
970 yield f
970 yield f
971
971
972 def keys(self):
972 def keys(self):
973 return list(self.iterkeys())
973 return list(self.iterkeys())
974
974
975 def __iter__(self):
975 def __iter__(self):
976 return self.iterkeys()
976 return self.iterkeys()
977
977
978 def __contains__(self, f):
978 def __contains__(self, f):
979 if f is None:
979 if f is None:
980 return False
980 return False
981 self._load()
981 self._load()
982 dir, subpath = _splittopdir(f)
982 dir, subpath = _splittopdir(f)
983 if dir:
983 if dir:
984 self._loadlazy(dir)
984 self._loadlazy(dir)
985
985
986 if dir not in self._dirs:
986 if dir not in self._dirs:
987 return False
987 return False
988
988
989 return self._dirs[dir].__contains__(subpath)
989 return self._dirs[dir].__contains__(subpath)
990 else:
990 else:
991 return f in self._files
991 return f in self._files
992
992
993 def get(self, f, default=None):
993 def get(self, f, default=None):
994 self._load()
994 self._load()
995 dir, subpath = _splittopdir(f)
995 dir, subpath = _splittopdir(f)
996 if dir:
996 if dir:
997 self._loadlazy(dir)
997 self._loadlazy(dir)
998
998
999 if dir not in self._dirs:
999 if dir not in self._dirs:
1000 return default
1000 return default
1001 return self._dirs[dir].get(subpath, default)
1001 return self._dirs[dir].get(subpath, default)
1002 else:
1002 else:
1003 return self._files.get(f, default)
1003 return self._files.get(f, default)
1004
1004
1005 def __getitem__(self, f):
1005 def __getitem__(self, f):
1006 self._load()
1006 self._load()
1007 dir, subpath = _splittopdir(f)
1007 dir, subpath = _splittopdir(f)
1008 if dir:
1008 if dir:
1009 self._loadlazy(dir)
1009 self._loadlazy(dir)
1010
1010
1011 return self._dirs[dir].__getitem__(subpath)
1011 return self._dirs[dir].__getitem__(subpath)
1012 else:
1012 else:
1013 return self._files[f]
1013 return self._files[f]
1014
1014
1015 def flags(self, f):
1015 def flags(self, f):
1016 self._load()
1016 self._load()
1017 dir, subpath = _splittopdir(f)
1017 dir, subpath = _splittopdir(f)
1018 if dir:
1018 if dir:
1019 self._loadlazy(dir)
1019 self._loadlazy(dir)
1020
1020
1021 if dir not in self._dirs:
1021 if dir not in self._dirs:
1022 return b''
1022 return b''
1023 return self._dirs[dir].flags(subpath)
1023 return self._dirs[dir].flags(subpath)
1024 else:
1024 else:
1025 if f in self._lazydirs or f in self._dirs:
1025 if f in self._lazydirs or f in self._dirs:
1026 return b''
1026 return b''
1027 return self._flags.get(f, b'')
1027 return self._flags.get(f, b'')
1028
1028
1029 def find(self, f):
1029 def find(self, f):
1030 self._load()
1030 self._load()
1031 dir, subpath = _splittopdir(f)
1031 dir, subpath = _splittopdir(f)
1032 if dir:
1032 if dir:
1033 self._loadlazy(dir)
1033 self._loadlazy(dir)
1034
1034
1035 return self._dirs[dir].find(subpath)
1035 return self._dirs[dir].find(subpath)
1036 else:
1036 else:
1037 return self._files[f], self._flags.get(f, b'')
1037 return self._files[f], self._flags.get(f, b'')
1038
1038
1039 def __delitem__(self, f):
1039 def __delitem__(self, f):
1040 self._load()
1040 self._load()
1041 dir, subpath = _splittopdir(f)
1041 dir, subpath = _splittopdir(f)
1042 if dir:
1042 if dir:
1043 self._loadlazy(dir)
1043 self._loadlazy(dir)
1044
1044
1045 self._dirs[dir].__delitem__(subpath)
1045 self._dirs[dir].__delitem__(subpath)
1046 # If the directory is now empty, remove it
1046 # If the directory is now empty, remove it
1047 if self._dirs[dir]._isempty():
1047 if self._dirs[dir]._isempty():
1048 del self._dirs[dir]
1048 del self._dirs[dir]
1049 else:
1049 else:
1050 del self._files[f]
1050 del self._files[f]
1051 if f in self._flags:
1051 if f in self._flags:
1052 del self._flags[f]
1052 del self._flags[f]
1053 self._dirty = True
1053 self._dirty = True
1054
1054
1055 def __setitem__(self, f, n):
1055 def __setitem__(self, f, n):
1056 assert n is not None
1056 assert n is not None
1057 self._load()
1057 self._load()
1058 dir, subpath = _splittopdir(f)
1058 dir, subpath = _splittopdir(f)
1059 if dir:
1059 if dir:
1060 self._loadlazy(dir)
1060 self._loadlazy(dir)
1061 if dir not in self._dirs:
1061 if dir not in self._dirs:
1062 self._dirs[dir] = treemanifest(
1062 self._dirs[dir] = treemanifest(
1063 self.nodeconstants, self._subpath(dir)
1063 self.nodeconstants, self._subpath(dir)
1064 )
1064 )
1065 self._dirs[dir].__setitem__(subpath, n)
1065 self._dirs[dir].__setitem__(subpath, n)
1066 else:
1066 else:
1067 # manifest nodes are either 20 bytes or 32 bytes,
1067 # manifest nodes are either 20 bytes or 32 bytes,
1068 # depending on the hash in use. Assert this as historically
1068 # depending on the hash in use. Assert this as historically
1069 # sometimes extra bytes were added.
1069 # sometimes extra bytes were added.
1070 assert len(n) in (20, 32)
1070 assert len(n) in (20, 32)
1071 self._files[f] = n
1071 self._files[f] = n
1072 self._dirty = True
1072 self._dirty = True
1073
1073
1074 def _load(self):
1074 def _load(self):
1075 if self._loadfunc is not _noop:
1075 if self._loadfunc is not _noop:
1076 lf, self._loadfunc = self._loadfunc, _noop
1076 lf, self._loadfunc = self._loadfunc, _noop
1077 lf(self)
1077 lf(self)
1078 elif self._copyfunc is not _noop:
1078 elif self._copyfunc is not _noop:
1079 cf, self._copyfunc = self._copyfunc, _noop
1079 cf, self._copyfunc = self._copyfunc, _noop
1080 cf(self)
1080 cf(self)
1081
1081
1082 def setflag(self, f, flags):
1082 def setflag(self, f, flags):
1083 """Set the flags (symlink, executable) for path f."""
1083 """Set the flags (symlink, executable) for path f."""
1084 if flags not in _manifestflags:
1084 if flags not in _manifestflags:
1085 raise TypeError(b"Invalid manifest flag set.")
1085 raise TypeError(b"Invalid manifest flag set.")
1086 self._load()
1086 self._load()
1087 dir, subpath = _splittopdir(f)
1087 dir, subpath = _splittopdir(f)
1088 if dir:
1088 if dir:
1089 self._loadlazy(dir)
1089 self._loadlazy(dir)
1090 if dir not in self._dirs:
1090 if dir not in self._dirs:
1091 self._dirs[dir] = treemanifest(
1091 self._dirs[dir] = treemanifest(
1092 self.nodeconstants, self._subpath(dir)
1092 self.nodeconstants, self._subpath(dir)
1093 )
1093 )
1094 self._dirs[dir].setflag(subpath, flags)
1094 self._dirs[dir].setflag(subpath, flags)
1095 else:
1095 else:
1096 self._flags[f] = flags
1096 self._flags[f] = flags
1097 self._dirty = True
1097 self._dirty = True
1098
1098
1099 def copy(self):
1099 def copy(self):
1100 copy = treemanifest(self.nodeconstants, self._dir)
1100 copy = treemanifest(self.nodeconstants, self._dir)
1101 copy._node = self._node
1101 copy._node = self._node
1102 copy._dirty = self._dirty
1102 copy._dirty = self._dirty
1103 if self._copyfunc is _noop:
1103 if self._copyfunc is _noop:
1104
1104
1105 def _copyfunc(s):
1105 def _copyfunc(s):
1106 self._load()
1106 self._load()
1107 s._lazydirs = {
1107 s._lazydirs = {
1108 d: (n, r, True)
1108 d: (n, r, True)
1109 for d, (n, r, c) in pycompat.iteritems(self._lazydirs)
1109 for d, (n, r, c) in pycompat.iteritems(self._lazydirs)
1110 }
1110 }
1111 sdirs = s._dirs
1111 sdirs = s._dirs
1112 for d, v in pycompat.iteritems(self._dirs):
1112 for d, v in pycompat.iteritems(self._dirs):
1113 sdirs[d] = v.copy()
1113 sdirs[d] = v.copy()
1114 s._files = dict.copy(self._files)
1114 s._files = dict.copy(self._files)
1115 s._flags = dict.copy(self._flags)
1115 s._flags = dict.copy(self._flags)
1116
1116
1117 if self._loadfunc is _noop:
1117 if self._loadfunc is _noop:
1118 _copyfunc(copy)
1118 _copyfunc(copy)
1119 else:
1119 else:
1120 copy._copyfunc = _copyfunc
1120 copy._copyfunc = _copyfunc
1121 else:
1121 else:
1122 copy._copyfunc = self._copyfunc
1122 copy._copyfunc = self._copyfunc
1123 return copy
1123 return copy
1124
1124
1125 def filesnotin(self, m2, match=None):
1125 def filesnotin(self, m2, match=None):
1126 '''Set of files in this manifest that are not in the other'''
1126 '''Set of files in this manifest that are not in the other'''
1127 if match and not match.always():
1127 if match and not match.always():
1128 m1 = self._matches(match)
1128 m1 = self._matches(match)
1129 m2 = m2._matches(match)
1129 m2 = m2._matches(match)
1130 return m1.filesnotin(m2)
1130 return m1.filesnotin(m2)
1131
1131
1132 files = set()
1132 files = set()
1133
1133
1134 def _filesnotin(t1, t2):
1134 def _filesnotin(t1, t2):
1135 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1135 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1136 return
1136 return
1137 t1._load()
1137 t1._load()
1138 t2._load()
1138 t2._load()
1139 self._loaddifflazy(t1, t2)
1139 self._loaddifflazy(t1, t2)
1140 for d, m1 in pycompat.iteritems(t1._dirs):
1140 for d, m1 in pycompat.iteritems(t1._dirs):
1141 if d in t2._dirs:
1141 if d in t2._dirs:
1142 m2 = t2._dirs[d]
1142 m2 = t2._dirs[d]
1143 _filesnotin(m1, m2)
1143 _filesnotin(m1, m2)
1144 else:
1144 else:
1145 files.update(m1.iterkeys())
1145 files.update(m1.iterkeys())
1146
1146
1147 for fn in t1._files:
1147 for fn in t1._files:
1148 if fn not in t2._files:
1148 if fn not in t2._files:
1149 files.add(t1._subpath(fn))
1149 files.add(t1._subpath(fn))
1150
1150
1151 _filesnotin(self, m2)
1151 _filesnotin(self, m2)
1152 return files
1152 return files
1153
1153
1154 @propertycache
1154 @propertycache
1155 def _alldirs(self):
1155 def _alldirs(self):
1156 return pathutil.dirs(self)
1156 return pathutil.dirs(self)
1157
1157
1158 def dirs(self):
1158 def dirs(self):
1159 return self._alldirs
1159 return self._alldirs
1160
1160
1161 def hasdir(self, dir):
1161 def hasdir(self, dir):
1162 self._load()
1162 self._load()
1163 topdir, subdir = _splittopdir(dir)
1163 topdir, subdir = _splittopdir(dir)
1164 if topdir:
1164 if topdir:
1165 self._loadlazy(topdir)
1165 self._loadlazy(topdir)
1166 if topdir in self._dirs:
1166 if topdir in self._dirs:
1167 return self._dirs[topdir].hasdir(subdir)
1167 return self._dirs[topdir].hasdir(subdir)
1168 return False
1168 return False
1169 dirslash = dir + b'/'
1169 dirslash = dir + b'/'
1170 return dirslash in self._dirs or dirslash in self._lazydirs
1170 return dirslash in self._dirs or dirslash in self._lazydirs
1171
1171
1172 def walk(self, match):
1172 def walk(self, match):
1173 """Generates matching file names.
1173 """Generates matching file names.
1174
1174
1175 It also reports nonexistent files by marking them bad with match.bad().
1175 It also reports nonexistent files by marking them bad with match.bad().
1176 """
1176 """
1177 if match.always():
1177 if match.always():
1178 for f in iter(self):
1178 for f in iter(self):
1179 yield f
1179 yield f
1180 return
1180 return
1181
1181
1182 fset = set(match.files())
1182 fset = set(match.files())
1183
1183
1184 for fn in self._walk(match):
1184 for fn in self._walk(match):
1185 if fn in fset:
1185 if fn in fset:
1186 # specified pattern is the exact name
1186 # specified pattern is the exact name
1187 fset.remove(fn)
1187 fset.remove(fn)
1188 yield fn
1188 yield fn
1189
1189
1190 # for dirstate.walk, files=[''] means "walk the whole tree".
1190 # for dirstate.walk, files=[''] means "walk the whole tree".
1191 # follow that here, too
1191 # follow that here, too
1192 fset.discard(b'')
1192 fset.discard(b'')
1193
1193
1194 for fn in sorted(fset):
1194 for fn in sorted(fset):
1195 if not self.hasdir(fn):
1195 if not self.hasdir(fn):
1196 match.bad(fn, None)
1196 match.bad(fn, None)
1197
1197
1198 def _walk(self, match):
1198 def _walk(self, match):
1199 '''Recursively generates matching file names for walk().'''
1199 '''Recursively generates matching file names for walk().'''
1200 visit = match.visitchildrenset(self._dir[:-1])
1200 visit = match.visitchildrenset(self._dir[:-1])
1201 if not visit:
1201 if not visit:
1202 return
1202 return
1203
1203
1204 # yield this dir's files and walk its submanifests
1204 # yield this dir's files and walk its submanifests
1205 self._load()
1205 self._load()
1206 visit = self._loadchildrensetlazy(visit)
1206 visit = self._loadchildrensetlazy(visit)
1207 for p in sorted(list(self._dirs) + list(self._files)):
1207 for p in sorted(list(self._dirs) + list(self._files)):
1208 if p in self._files:
1208 if p in self._files:
1209 fullp = self._subpath(p)
1209 fullp = self._subpath(p)
1210 if match(fullp):
1210 if match(fullp):
1211 yield fullp
1211 yield fullp
1212 else:
1212 else:
1213 if not visit or p[:-1] in visit:
1213 if not visit or p[:-1] in visit:
1214 for f in self._dirs[p]._walk(match):
1214 for f in self._dirs[p]._walk(match):
1215 yield f
1215 yield f
1216
1216
1217 def _matches(self, match):
1217 def _matches(self, match):
1218 """recursively generate a new manifest filtered by the match argument."""
1218 """recursively generate a new manifest filtered by the match argument."""
1219 if match.always():
1219 if match.always():
1220 return self.copy()
1220 return self.copy()
1221 return self._matches_inner(match)
1221 return self._matches_inner(match)
1222
1222
1223 def _matches_inner(self, match):
1223 def _matches_inner(self, match):
1224 if match.always():
1224 if match.always():
1225 return self.copy()
1225 return self.copy()
1226
1226
1227 visit = match.visitchildrenset(self._dir[:-1])
1227 visit = match.visitchildrenset(self._dir[:-1])
1228 if visit == b'all':
1228 if visit == b'all':
1229 return self.copy()
1229 return self.copy()
1230 ret = treemanifest(self.nodeconstants, self._dir)
1230 ret = treemanifest(self.nodeconstants, self._dir)
1231 if not visit:
1231 if not visit:
1232 return ret
1232 return ret
1233
1233
1234 self._load()
1234 self._load()
1235 for fn in self._files:
1235 for fn in self._files:
1236 # While visitchildrenset *usually* lists only subdirs, this is
1236 # While visitchildrenset *usually* lists only subdirs, this is
1237 # actually up to the matcher and may have some files in the set().
1237 # actually up to the matcher and may have some files in the set().
1238 # If visit == 'this', we should obviously look at the files in this
1238 # If visit == 'this', we should obviously look at the files in this
1239 # directory; if visit is a set, and fn is in it, we should inspect
1239 # directory; if visit is a set, and fn is in it, we should inspect
1240 # fn (but no need to inspect things not in the set).
1240 # fn (but no need to inspect things not in the set).
1241 if visit != b'this' and fn not in visit:
1241 if visit != b'this' and fn not in visit:
1242 continue
1242 continue
1243 fullp = self._subpath(fn)
1243 fullp = self._subpath(fn)
1244 # visitchildrenset isn't perfect, we still need to call the regular
1244 # visitchildrenset isn't perfect, we still need to call the regular
1245 # matcher code to further filter results.
1245 # matcher code to further filter results.
1246 if not match(fullp):
1246 if not match(fullp):
1247 continue
1247 continue
1248 ret._files[fn] = self._files[fn]
1248 ret._files[fn] = self._files[fn]
1249 if fn in self._flags:
1249 if fn in self._flags:
1250 ret._flags[fn] = self._flags[fn]
1250 ret._flags[fn] = self._flags[fn]
1251
1251
1252 visit = self._loadchildrensetlazy(visit)
1252 visit = self._loadchildrensetlazy(visit)
1253 for dir, subm in pycompat.iteritems(self._dirs):
1253 for dir, subm in pycompat.iteritems(self._dirs):
1254 if visit and dir[:-1] not in visit:
1254 if visit and dir[:-1] not in visit:
1255 continue
1255 continue
1256 m = subm._matches_inner(match)
1256 m = subm._matches_inner(match)
1257 if not m._isempty():
1257 if not m._isempty():
1258 ret._dirs[dir] = m
1258 ret._dirs[dir] = m
1259
1259
1260 if not ret._isempty():
1260 if not ret._isempty():
1261 ret._dirty = True
1261 ret._dirty = True
1262 return ret
1262 return ret
1263
1263
1264 def fastdelta(self, base, changes):
1264 def fastdelta(self, base, changes):
1265 raise FastdeltaUnavailable()
1265 raise FastdeltaUnavailable()
1266
1266
1267 def diff(self, m2, match=None, clean=False):
1267 def diff(self, m2, match=None, clean=False):
1268 """Finds changes between the current manifest and m2.
1268 """Finds changes between the current manifest and m2.
1269
1269
1270 Args:
1270 Args:
1271 m2: the manifest to which this manifest should be compared.
1271 m2: the manifest to which this manifest should be compared.
1272 clean: if true, include files unchanged between these manifests
1272 clean: if true, include files unchanged between these manifests
1273 with a None value in the returned dictionary.
1273 with a None value in the returned dictionary.
1274
1274
1275 The result is returned as a dict with filename as key and
1275 The result is returned as a dict with filename as key and
1276 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1276 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1277 nodeid in the current/other manifest and fl1/fl2 is the flag
1277 nodeid in the current/other manifest and fl1/fl2 is the flag
1278 in the current/other manifest. Where the file does not exist,
1278 in the current/other manifest. Where the file does not exist,
1279 the nodeid will be None and the flags will be the empty
1279 the nodeid will be None and the flags will be the empty
1280 string.
1280 string.
1281 """
1281 """
1282 if match and not match.always():
1282 if match and not match.always():
1283 m1 = self._matches(match)
1283 m1 = self._matches(match)
1284 m2 = m2._matches(match)
1284 m2 = m2._matches(match)
1285 return m1.diff(m2, clean=clean)
1285 return m1.diff(m2, clean=clean)
1286 result = {}
1286 result = {}
1287 emptytree = treemanifest(self.nodeconstants)
1287 emptytree = treemanifest(self.nodeconstants)
1288
1288
1289 def _iterativediff(t1, t2, stack):
1289 def _iterativediff(t1, t2, stack):
1290 """compares two tree manifests and append new tree-manifests which
1290 """compares two tree manifests and append new tree-manifests which
1291 needs to be compared to stack"""
1291 needs to be compared to stack"""
1292 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1292 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1293 return
1293 return
1294 t1._load()
1294 t1._load()
1295 t2._load()
1295 t2._load()
1296 self._loaddifflazy(t1, t2)
1296 self._loaddifflazy(t1, t2)
1297
1297
1298 for d, m1 in pycompat.iteritems(t1._dirs):
1298 for d, m1 in pycompat.iteritems(t1._dirs):
1299 m2 = t2._dirs.get(d, emptytree)
1299 m2 = t2._dirs.get(d, emptytree)
1300 stack.append((m1, m2))
1300 stack.append((m1, m2))
1301
1301
1302 for d, m2 in pycompat.iteritems(t2._dirs):
1302 for d, m2 in pycompat.iteritems(t2._dirs):
1303 if d not in t1._dirs:
1303 if d not in t1._dirs:
1304 stack.append((emptytree, m2))
1304 stack.append((emptytree, m2))
1305
1305
1306 for fn, n1 in pycompat.iteritems(t1._files):
1306 for fn, n1 in pycompat.iteritems(t1._files):
1307 fl1 = t1._flags.get(fn, b'')
1307 fl1 = t1._flags.get(fn, b'')
1308 n2 = t2._files.get(fn, None)
1308 n2 = t2._files.get(fn, None)
1309 fl2 = t2._flags.get(fn, b'')
1309 fl2 = t2._flags.get(fn, b'')
1310 if n1 != n2 or fl1 != fl2:
1310 if n1 != n2 or fl1 != fl2:
1311 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1311 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1312 elif clean:
1312 elif clean:
1313 result[t1._subpath(fn)] = None
1313 result[t1._subpath(fn)] = None
1314
1314
1315 for fn, n2 in pycompat.iteritems(t2._files):
1315 for fn, n2 in pycompat.iteritems(t2._files):
1316 if fn not in t1._files:
1316 if fn not in t1._files:
1317 fl2 = t2._flags.get(fn, b'')
1317 fl2 = t2._flags.get(fn, b'')
1318 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1318 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1319
1319
1320 stackls = []
1320 stackls = []
1321 _iterativediff(self, m2, stackls)
1321 _iterativediff(self, m2, stackls)
1322 while stackls:
1322 while stackls:
1323 t1, t2 = stackls.pop()
1323 t1, t2 = stackls.pop()
1324 # stackls is populated in the function call
1324 # stackls is populated in the function call
1325 _iterativediff(t1, t2, stackls)
1325 _iterativediff(t1, t2, stackls)
1326 return result
1326 return result
1327
1327
1328 def unmodifiedsince(self, m2):
1328 def unmodifiedsince(self, m2):
1329 return not self._dirty and not m2._dirty and self._node == m2._node
1329 return not self._dirty and not m2._dirty and self._node == m2._node
1330
1330
1331 def parse(self, text, readsubtree):
1331 def parse(self, text, readsubtree):
1332 selflazy = self._lazydirs
1332 selflazy = self._lazydirs
1333 for f, n, fl in _parse(self._nodelen, text):
1333 for f, n, fl in _parse(self._nodelen, text):
1334 if fl == b't':
1334 if fl == b't':
1335 f = f + b'/'
1335 f = f + b'/'
1336 # False below means "doesn't need to be copied" and can use the
1336 # False below means "doesn't need to be copied" and can use the
1337 # cached value from readsubtree directly.
1337 # cached value from readsubtree directly.
1338 selflazy[f] = (n, readsubtree, False)
1338 selflazy[f] = (n, readsubtree, False)
1339 elif b'/' in f:
1339 elif b'/' in f:
1340 # This is a flat manifest, so use __setitem__ and setflag rather
1340 # This is a flat manifest, so use __setitem__ and setflag rather
1341 # than assigning directly to _files and _flags, so we can
1341 # than assigning directly to _files and _flags, so we can
1342 # assign a path in a subdirectory, and to mark dirty (compared
1342 # assign a path in a subdirectory, and to mark dirty (compared
1343 # to nullid).
1343 # to nullid).
1344 self[f] = n
1344 self[f] = n
1345 if fl:
1345 if fl:
1346 self.setflag(f, fl)
1346 self.setflag(f, fl)
1347 else:
1347 else:
1348 # Assigning to _files and _flags avoids marking as dirty,
1348 # Assigning to _files and _flags avoids marking as dirty,
1349 # and should be a little faster.
1349 # and should be a little faster.
1350 self._files[f] = n
1350 self._files[f] = n
1351 if fl:
1351 if fl:
1352 self._flags[f] = fl
1352 self._flags[f] = fl
1353
1353
1354 def text(self):
1354 def text(self):
1355 """Get the full data of this manifest as a bytestring."""
1355 """Get the full data of this manifest as a bytestring."""
1356 self._load()
1356 self._load()
1357 return _text(self.iterentries())
1357 return _text(self.iterentries())
1358
1358
1359 def dirtext(self):
1359 def dirtext(self):
1360 """Get the full data of this directory as a bytestring. Make sure that
1360 """Get the full data of this directory as a bytestring. Make sure that
1361 any submanifests have been written first, so their nodeids are correct.
1361 any submanifests have been written first, so their nodeids are correct.
1362 """
1362 """
1363 self._load()
1363 self._load()
1364 flags = self.flags
1364 flags = self.flags
1365 lazydirs = [
1365 lazydirs = [
1366 (d[:-1], v[0], b't') for d, v in pycompat.iteritems(self._lazydirs)
1366 (d[:-1], v[0], b't') for d, v in pycompat.iteritems(self._lazydirs)
1367 ]
1367 ]
1368 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1368 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1369 files = [(f, self._files[f], flags(f)) for f in self._files]
1369 files = [(f, self._files[f], flags(f)) for f in self._files]
1370 return _text(sorted(dirs + files + lazydirs))
1370 return _text(sorted(dirs + files + lazydirs))
1371
1371
1372 def read(self, gettext, readsubtree):
1372 def read(self, gettext, readsubtree):
1373 def _load_for_read(s):
1373 def _load_for_read(s):
1374 s.parse(gettext(), readsubtree)
1374 s.parse(gettext(), readsubtree)
1375 s._dirty = False
1375 s._dirty = False
1376
1376
1377 self._loadfunc = _load_for_read
1377 self._loadfunc = _load_for_read
1378
1378
1379 def writesubtrees(self, m1, m2, writesubtree, match):
1379 def writesubtrees(self, m1, m2, writesubtree, match):
1380 self._load() # for consistency; should never have any effect here
1380 self._load() # for consistency; should never have any effect here
1381 m1._load()
1381 m1._load()
1382 m2._load()
1382 m2._load()
1383 emptytree = treemanifest(self.nodeconstants)
1383 emptytree = treemanifest(self.nodeconstants)
1384
1384
1385 def getnode(m, d):
1385 def getnode(m, d):
1386 ld = m._lazydirs.get(d)
1386 ld = m._lazydirs.get(d)
1387 if ld:
1387 if ld:
1388 return ld[0]
1388 return ld[0]
1389 return m._dirs.get(d, emptytree)._node
1389 return m._dirs.get(d, emptytree)._node
1390
1390
1391 # let's skip investigating things that `match` says we do not need.
1391 # let's skip investigating things that `match` says we do not need.
1392 visit = match.visitchildrenset(self._dir[:-1])
1392 visit = match.visitchildrenset(self._dir[:-1])
1393 visit = self._loadchildrensetlazy(visit)
1393 visit = self._loadchildrensetlazy(visit)
1394 if visit == b'this' or visit == b'all':
1394 if visit == b'this' or visit == b'all':
1395 visit = None
1395 visit = None
1396 for d, subm in pycompat.iteritems(self._dirs):
1396 for d, subm in pycompat.iteritems(self._dirs):
1397 if visit and d[:-1] not in visit:
1397 if visit and d[:-1] not in visit:
1398 continue
1398 continue
1399 subp1 = getnode(m1, d)
1399 subp1 = getnode(m1, d)
1400 subp2 = getnode(m2, d)
1400 subp2 = getnode(m2, d)
1401 if subp1 == self.nodeconstants.nullid:
1401 if subp1 == self.nodeconstants.nullid:
1402 subp1, subp2 = subp2, subp1
1402 subp1, subp2 = subp2, subp1
1403 writesubtree(subm, subp1, subp2, match)
1403 writesubtree(subm, subp1, subp2, match)
1404
1404
1405 def walksubtrees(self, matcher=None):
1405 def walksubtrees(self, matcher=None):
1406 """Returns an iterator of the subtrees of this manifest, including this
1406 """Returns an iterator of the subtrees of this manifest, including this
1407 manifest itself.
1407 manifest itself.
1408
1408
1409 If `matcher` is provided, it only returns subtrees that match.
1409 If `matcher` is provided, it only returns subtrees that match.
1410 """
1410 """
1411 if matcher and not matcher.visitdir(self._dir[:-1]):
1411 if matcher and not matcher.visitdir(self._dir[:-1]):
1412 return
1412 return
1413 if not matcher or matcher(self._dir[:-1]):
1413 if not matcher or matcher(self._dir[:-1]):
1414 yield self
1414 yield self
1415
1415
1416 self._load()
1416 self._load()
1417 # OPT: use visitchildrenset to avoid loading everything.
1417 # OPT: use visitchildrenset to avoid loading everything.
1418 self._loadalllazy()
1418 self._loadalllazy()
1419 for d, subm in pycompat.iteritems(self._dirs):
1419 for d, subm in pycompat.iteritems(self._dirs):
1420 for subtree in subm.walksubtrees(matcher=matcher):
1420 for subtree in subm.walksubtrees(matcher=matcher):
1421 yield subtree
1421 yield subtree
1422
1422
1423
1423
1424 class manifestfulltextcache(util.lrucachedict):
1424 class manifestfulltextcache(util.lrucachedict):
1425 """File-backed LRU cache for the manifest cache
1425 """File-backed LRU cache for the manifest cache
1426
1426
1427 File consists of entries, up to EOF:
1427 File consists of entries, up to EOF:
1428
1428
1429 - 20 bytes node, 4 bytes length, <length> manifest data
1429 - 20 bytes node, 4 bytes length, <length> manifest data
1430
1430
1431 These are written in reverse cache order (oldest to newest).
1431 These are written in reverse cache order (oldest to newest).
1432
1432
1433 """
1433 """
1434
1434
1435 _file = b'manifestfulltextcache'
1435 _file = b'manifestfulltextcache'
1436
1436
1437 def __init__(self, max):
1437 def __init__(self, max):
1438 super(manifestfulltextcache, self).__init__(max)
1438 super(manifestfulltextcache, self).__init__(max)
1439 self._dirty = False
1439 self._dirty = False
1440 self._read = False
1440 self._read = False
1441 self._opener = None
1441 self._opener = None
1442
1442
1443 def read(self):
1443 def read(self):
1444 if self._read or self._opener is None:
1444 if self._read or self._opener is None:
1445 return
1445 return
1446
1446
1447 try:
1447 try:
1448 with self._opener(self._file) as fp:
1448 with self._opener(self._file) as fp:
1449 set = super(manifestfulltextcache, self).__setitem__
1449 set = super(manifestfulltextcache, self).__setitem__
1450 # ignore trailing data, this is a cache, corruption is skipped
1450 # ignore trailing data, this is a cache, corruption is skipped
1451 while True:
1451 while True:
1452 # TODO do we need to do work here for sha1 portability?
1452 # TODO do we need to do work here for sha1 portability?
1453 node = fp.read(20)
1453 node = fp.read(20)
1454 if len(node) < 20:
1454 if len(node) < 20:
1455 break
1455 break
1456 try:
1456 try:
1457 size = struct.unpack(b'>L', fp.read(4))[0]
1457 size = struct.unpack(b'>L', fp.read(4))[0]
1458 except struct.error:
1458 except struct.error:
1459 break
1459 break
1460 value = bytearray(fp.read(size))
1460 value = bytearray(fp.read(size))
1461 if len(value) != size:
1461 if len(value) != size:
1462 break
1462 break
1463 set(node, value)
1463 set(node, value)
1464 except IOError:
1464 except IOError:
1465 # the file is allowed to be missing
1465 # the file is allowed to be missing
1466 pass
1466 pass
1467
1467
1468 self._read = True
1468 self._read = True
1469 self._dirty = False
1469 self._dirty = False
1470
1470
1471 def write(self):
1471 def write(self):
1472 if not self._dirty or self._opener is None:
1472 if not self._dirty or self._opener is None:
1473 return
1473 return
1474 # rotate backwards to the first used node
1474 # rotate backwards to the first used node
1475 try:
1475 try:
1476 with self._opener(
1476 with self._opener(
1477 self._file, b'w', atomictemp=True, checkambig=True
1477 self._file, b'w', atomictemp=True, checkambig=True
1478 ) as fp:
1478 ) as fp:
1479 node = self._head.prev
1479 node = self._head.prev
1480 while True:
1480 while True:
1481 if node.key in self._cache:
1481 if node.key in self._cache:
1482 fp.write(node.key)
1482 fp.write(node.key)
1483 fp.write(struct.pack(b'>L', len(node.value)))
1483 fp.write(struct.pack(b'>L', len(node.value)))
1484 fp.write(node.value)
1484 fp.write(node.value)
1485 if node is self._head:
1485 if node is self._head:
1486 break
1486 break
1487 node = node.prev
1487 node = node.prev
1488 except IOError:
1488 except IOError:
1489 # We could not write the cache (eg: permission error)
1489 # We could not write the cache (eg: permission error)
1490 # the content can be missing.
1490 # the content can be missing.
1491 #
1491 #
1492 # We could try harder and see if we could recreate a wcache
1492 # We could try harder and see if we could recreate a wcache
1493 # directory were we coudl write too.
1493 # directory were we coudl write too.
1494 #
1494 #
1495 # XXX the error pass silently, having some way to issue an error
1495 # XXX the error pass silently, having some way to issue an error
1496 # log `ui.log` would be nice.
1496 # log `ui.log` would be nice.
1497 pass
1497 pass
1498
1498
1499 def __len__(self):
1499 def __len__(self):
1500 if not self._read:
1500 if not self._read:
1501 self.read()
1501 self.read()
1502 return super(manifestfulltextcache, self).__len__()
1502 return super(manifestfulltextcache, self).__len__()
1503
1503
1504 def __contains__(self, k):
1504 def __contains__(self, k):
1505 if not self._read:
1505 if not self._read:
1506 self.read()
1506 self.read()
1507 return super(manifestfulltextcache, self).__contains__(k)
1507 return super(manifestfulltextcache, self).__contains__(k)
1508
1508
1509 def __iter__(self):
1509 def __iter__(self):
1510 if not self._read:
1510 if not self._read:
1511 self.read()
1511 self.read()
1512 return super(manifestfulltextcache, self).__iter__()
1512 return super(manifestfulltextcache, self).__iter__()
1513
1513
1514 def __getitem__(self, k):
1514 def __getitem__(self, k):
1515 if not self._read:
1515 if not self._read:
1516 self.read()
1516 self.read()
1517 # the cache lru order can change on read
1517 # the cache lru order can change on read
1518 setdirty = self._cache.get(k) is not self._head
1518 setdirty = self._cache.get(k) is not self._head
1519 value = super(manifestfulltextcache, self).__getitem__(k)
1519 value = super(manifestfulltextcache, self).__getitem__(k)
1520 if setdirty:
1520 if setdirty:
1521 self._dirty = True
1521 self._dirty = True
1522 return value
1522 return value
1523
1523
1524 def __setitem__(self, k, v):
1524 def __setitem__(self, k, v):
1525 if not self._read:
1525 if not self._read:
1526 self.read()
1526 self.read()
1527 super(manifestfulltextcache, self).__setitem__(k, v)
1527 super(manifestfulltextcache, self).__setitem__(k, v)
1528 self._dirty = True
1528 self._dirty = True
1529
1529
1530 def __delitem__(self, k):
1530 def __delitem__(self, k):
1531 if not self._read:
1531 if not self._read:
1532 self.read()
1532 self.read()
1533 super(manifestfulltextcache, self).__delitem__(k)
1533 super(manifestfulltextcache, self).__delitem__(k)
1534 self._dirty = True
1534 self._dirty = True
1535
1535
1536 def get(self, k, default=None):
1536 def get(self, k, default=None):
1537 if not self._read:
1537 if not self._read:
1538 self.read()
1538 self.read()
1539 return super(manifestfulltextcache, self).get(k, default=default)
1539 return super(manifestfulltextcache, self).get(k, default=default)
1540
1540
1541 def clear(self, clear_persisted_data=False):
1541 def clear(self, clear_persisted_data=False):
1542 super(manifestfulltextcache, self).clear()
1542 super(manifestfulltextcache, self).clear()
1543 if clear_persisted_data:
1543 if clear_persisted_data:
1544 self._dirty = True
1544 self._dirty = True
1545 self.write()
1545 self.write()
1546 self._read = False
1546 self._read = False
1547
1547
1548
1548
1549 # and upper bound of what we expect from compression
1549 # and upper bound of what we expect from compression
1550 # (real live value seems to be "3")
1550 # (real live value seems to be "3")
1551 MAXCOMPRESSION = 3
1551 MAXCOMPRESSION = 3
1552
1552
1553
1553
1554 class FastdeltaUnavailable(Exception):
1554 class FastdeltaUnavailable(Exception):
1555 """Exception raised when fastdelta isn't usable on a manifest."""
1555 """Exception raised when fastdelta isn't usable on a manifest."""
1556
1556
1557
1557
1558 @interfaceutil.implementer(repository.imanifeststorage)
1558 @interfaceutil.implementer(repository.imanifeststorage)
1559 class manifestrevlog(object):
1559 class manifestrevlog(object):
1560 """A revlog that stores manifest texts. This is responsible for caching the
1560 """A revlog that stores manifest texts. This is responsible for caching the
1561 full-text manifest contents.
1561 full-text manifest contents.
1562 """
1562 """
1563
1563
1564 def __init__(
1564 def __init__(
1565 self,
1565 self,
1566 nodeconstants,
1566 nodeconstants,
1567 opener,
1567 opener,
1568 tree=b'',
1568 tree=b'',
1569 dirlogcache=None,
1569 dirlogcache=None,
1570 indexfile=None,
1571 treemanifest=False,
1570 treemanifest=False,
1572 ):
1571 ):
1573 """Constructs a new manifest revlog
1572 """Constructs a new manifest revlog
1574
1573
1575 `indexfile` - used by extensions to have two manifests at once, like
1574 `indexfile` - used by extensions to have two manifests at once, like
1576 when transitioning between flatmanifeset and treemanifests.
1575 when transitioning between flatmanifeset and treemanifests.
1577
1576
1578 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1577 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1579 options can also be used to make this a tree manifest revlog. The opener
1578 options can also be used to make this a tree manifest revlog. The opener
1580 option takes precedence, so if it is set to True, we ignore whatever
1579 option takes precedence, so if it is set to True, we ignore whatever
1581 value is passed in to the constructor.
1580 value is passed in to the constructor.
1582 """
1581 """
1583 self.nodeconstants = nodeconstants
1582 self.nodeconstants = nodeconstants
1584 # During normal operations, we expect to deal with not more than four
1583 # During normal operations, we expect to deal with not more than four
1585 # revs at a time (such as during commit --amend). When rebasing large
1584 # revs at a time (such as during commit --amend). When rebasing large
1586 # stacks of commits, the number can go up, hence the config knob below.
1585 # stacks of commits, the number can go up, hence the config knob below.
1587 cachesize = 4
1586 cachesize = 4
1588 optiontreemanifest = False
1587 optiontreemanifest = False
1589 opts = getattr(opener, 'options', None)
1588 opts = getattr(opener, 'options', None)
1590 if opts is not None:
1589 if opts is not None:
1591 cachesize = opts.get(b'manifestcachesize', cachesize)
1590 cachesize = opts.get(b'manifestcachesize', cachesize)
1592 optiontreemanifest = opts.get(b'treemanifest', False)
1591 optiontreemanifest = opts.get(b'treemanifest', False)
1593
1592
1594 self._treeondisk = optiontreemanifest or treemanifest
1593 self._treeondisk = optiontreemanifest or treemanifest
1595
1594
1596 self._fulltextcache = manifestfulltextcache(cachesize)
1595 self._fulltextcache = manifestfulltextcache(cachesize)
1597
1596
1598 if tree:
1597 if tree:
1599 assert self._treeondisk, b'opts is %r' % opts
1598 assert self._treeondisk, b'opts is %r' % opts
1600
1599
1601 if indexfile is None:
1600 radix = b'00manifest'
1602 indexfile = b'00manifest.i'
1603 if tree:
1601 if tree:
1604 indexfile = b"meta/" + tree + indexfile
1602 radix = b"meta/" + tree + radix
1605
1603
1606 self.tree = tree
1604 self.tree = tree
1607
1605
1608 # The dirlogcache is kept on the root manifest log
1606 # The dirlogcache is kept on the root manifest log
1609 if tree:
1607 if tree:
1610 self._dirlogcache = dirlogcache
1608 self._dirlogcache = dirlogcache
1611 else:
1609 else:
1612 self._dirlogcache = {b'': self}
1610 self._dirlogcache = {b'': self}
1613
1611
1614 self._revlog = revlog.revlog(
1612 self._revlog = revlog.revlog(
1615 opener,
1613 opener,
1616 target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
1614 target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
1617 indexfile=indexfile,
1615 radix=radix,
1618 # only root indexfile is cached
1616 # only root indexfile is cached
1619 checkambig=not bool(tree),
1617 checkambig=not bool(tree),
1620 mmaplargeindex=True,
1618 mmaplargeindex=True,
1621 upperboundcomp=MAXCOMPRESSION,
1619 upperboundcomp=MAXCOMPRESSION,
1622 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
1620 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
1623 )
1621 )
1624
1622
1625 self.index = self._revlog.index
1623 self.index = self._revlog.index
1626 self._generaldelta = self._revlog._generaldelta
1624 self._generaldelta = self._revlog._generaldelta
1627
1625
1628 def _setupmanifestcachehooks(self, repo):
1626 def _setupmanifestcachehooks(self, repo):
1629 """Persist the manifestfulltextcache on lock release"""
1627 """Persist the manifestfulltextcache on lock release"""
1630 if not util.safehasattr(repo, b'_wlockref'):
1628 if not util.safehasattr(repo, b'_wlockref'):
1631 return
1629 return
1632
1630
1633 self._fulltextcache._opener = repo.wcachevfs
1631 self._fulltextcache._opener = repo.wcachevfs
1634 if repo._currentlock(repo._wlockref) is None:
1632 if repo._currentlock(repo._wlockref) is None:
1635 return
1633 return
1636
1634
1637 reporef = weakref.ref(repo)
1635 reporef = weakref.ref(repo)
1638 manifestrevlogref = weakref.ref(self)
1636 manifestrevlogref = weakref.ref(self)
1639
1637
1640 def persistmanifestcache(success):
1638 def persistmanifestcache(success):
1641 # Repo is in an unknown state, do not persist.
1639 # Repo is in an unknown state, do not persist.
1642 if not success:
1640 if not success:
1643 return
1641 return
1644
1642
1645 repo = reporef()
1643 repo = reporef()
1646 self = manifestrevlogref()
1644 self = manifestrevlogref()
1647 if repo is None or self is None:
1645 if repo is None or self is None:
1648 return
1646 return
1649 if repo.manifestlog.getstorage(b'') is not self:
1647 if repo.manifestlog.getstorage(b'') is not self:
1650 # there's a different manifest in play now, abort
1648 # there's a different manifest in play now, abort
1651 return
1649 return
1652 self._fulltextcache.write()
1650 self._fulltextcache.write()
1653
1651
1654 repo._afterlock(persistmanifestcache)
1652 repo._afterlock(persistmanifestcache)
1655
1653
1656 @property
1654 @property
1657 def fulltextcache(self):
1655 def fulltextcache(self):
1658 return self._fulltextcache
1656 return self._fulltextcache
1659
1657
1660 def clearcaches(self, clear_persisted_data=False):
1658 def clearcaches(self, clear_persisted_data=False):
1661 self._revlog.clearcaches()
1659 self._revlog.clearcaches()
1662 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1660 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1663 self._dirlogcache = {self.tree: self}
1661 self._dirlogcache = {self.tree: self}
1664
1662
1665 def dirlog(self, d):
1663 def dirlog(self, d):
1666 if d:
1664 if d:
1667 assert self._treeondisk
1665 assert self._treeondisk
1668 if d not in self._dirlogcache:
1666 if d not in self._dirlogcache:
1669 mfrevlog = manifestrevlog(
1667 mfrevlog = manifestrevlog(
1670 self.nodeconstants,
1668 self.nodeconstants,
1671 self.opener,
1669 self.opener,
1672 d,
1670 d,
1673 self._dirlogcache,
1671 self._dirlogcache,
1674 treemanifest=self._treeondisk,
1672 treemanifest=self._treeondisk,
1675 )
1673 )
1676 self._dirlogcache[d] = mfrevlog
1674 self._dirlogcache[d] = mfrevlog
1677 return self._dirlogcache[d]
1675 return self._dirlogcache[d]
1678
1676
1679 def add(
1677 def add(
1680 self,
1678 self,
1681 m,
1679 m,
1682 transaction,
1680 transaction,
1683 link,
1681 link,
1684 p1,
1682 p1,
1685 p2,
1683 p2,
1686 added,
1684 added,
1687 removed,
1685 removed,
1688 readtree=None,
1686 readtree=None,
1689 match=None,
1687 match=None,
1690 ):
1688 ):
1691 """add some manifest entry in to the manifest log
1689 """add some manifest entry in to the manifest log
1692
1690
1693 input:
1691 input:
1694
1692
1695 m: the manifest dict we want to store
1693 m: the manifest dict we want to store
1696 transaction: the open transaction
1694 transaction: the open transaction
1697 p1: manifest-node of p1
1695 p1: manifest-node of p1
1698 p2: manifest-node of p2
1696 p2: manifest-node of p2
1699 added: file added/changed compared to parent
1697 added: file added/changed compared to parent
1700 removed: file removed compared to parent
1698 removed: file removed compared to parent
1701
1699
1702 tree manifest input:
1700 tree manifest input:
1703
1701
1704 readtree: a function to read a subtree
1702 readtree: a function to read a subtree
1705 match: a filematcher for the subpart of the tree manifest
1703 match: a filematcher for the subpart of the tree manifest
1706 """
1704 """
1707 try:
1705 try:
1708 if p1 not in self.fulltextcache:
1706 if p1 not in self.fulltextcache:
1709 raise FastdeltaUnavailable()
1707 raise FastdeltaUnavailable()
1710 # If our first parent is in the manifest cache, we can
1708 # If our first parent is in the manifest cache, we can
1711 # compute a delta here using properties we know about the
1709 # compute a delta here using properties we know about the
1712 # manifest up-front, which may save time later for the
1710 # manifest up-front, which may save time later for the
1713 # revlog layer.
1711 # revlog layer.
1714
1712
1715 _checkforbidden(added)
1713 _checkforbidden(added)
1716 # combine the changed lists into one sorted iterator
1714 # combine the changed lists into one sorted iterator
1717 work = heapq.merge(
1715 work = heapq.merge(
1718 [(x, False) for x in sorted(added)],
1716 [(x, False) for x in sorted(added)],
1719 [(x, True) for x in sorted(removed)],
1717 [(x, True) for x in sorted(removed)],
1720 )
1718 )
1721
1719
1722 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1720 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1723 cachedelta = self._revlog.rev(p1), deltatext
1721 cachedelta = self._revlog.rev(p1), deltatext
1724 text = util.buffer(arraytext)
1722 text = util.buffer(arraytext)
1725 rev = self._revlog.addrevision(
1723 rev = self._revlog.addrevision(
1726 text, transaction, link, p1, p2, cachedelta
1724 text, transaction, link, p1, p2, cachedelta
1727 )
1725 )
1728 n = self._revlog.node(rev)
1726 n = self._revlog.node(rev)
1729 except FastdeltaUnavailable:
1727 except FastdeltaUnavailable:
1730 # The first parent manifest isn't already loaded or the
1728 # The first parent manifest isn't already loaded or the
1731 # manifest implementation doesn't support fastdelta, so
1729 # manifest implementation doesn't support fastdelta, so
1732 # we'll just encode a fulltext of the manifest and pass
1730 # we'll just encode a fulltext of the manifest and pass
1733 # that through to the revlog layer, and let it handle the
1731 # that through to the revlog layer, and let it handle the
1734 # delta process.
1732 # delta process.
1735 if self._treeondisk:
1733 if self._treeondisk:
1736 assert readtree, b"readtree must be set for treemanifest writes"
1734 assert readtree, b"readtree must be set for treemanifest writes"
1737 assert match, b"match must be specified for treemanifest writes"
1735 assert match, b"match must be specified for treemanifest writes"
1738 m1 = readtree(self.tree, p1)
1736 m1 = readtree(self.tree, p1)
1739 m2 = readtree(self.tree, p2)
1737 m2 = readtree(self.tree, p2)
1740 n = self._addtree(
1738 n = self._addtree(
1741 m, transaction, link, m1, m2, readtree, match=match
1739 m, transaction, link, m1, m2, readtree, match=match
1742 )
1740 )
1743 arraytext = None
1741 arraytext = None
1744 else:
1742 else:
1745 text = m.text()
1743 text = m.text()
1746 rev = self._revlog.addrevision(text, transaction, link, p1, p2)
1744 rev = self._revlog.addrevision(text, transaction, link, p1, p2)
1747 n = self._revlog.node(rev)
1745 n = self._revlog.node(rev)
1748 arraytext = bytearray(text)
1746 arraytext = bytearray(text)
1749
1747
1750 if arraytext is not None:
1748 if arraytext is not None:
1751 self.fulltextcache[n] = arraytext
1749 self.fulltextcache[n] = arraytext
1752
1750
1753 return n
1751 return n
1754
1752
1755 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1753 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1756 # If the manifest is unchanged compared to one parent,
1754 # If the manifest is unchanged compared to one parent,
1757 # don't write a new revision
1755 # don't write a new revision
1758 if self.tree != b'' and (
1756 if self.tree != b'' and (
1759 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1757 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1760 ):
1758 ):
1761 return m.node()
1759 return m.node()
1762
1760
1763 def writesubtree(subm, subp1, subp2, match):
1761 def writesubtree(subm, subp1, subp2, match):
1764 sublog = self.dirlog(subm.dir())
1762 sublog = self.dirlog(subm.dir())
1765 sublog.add(
1763 sublog.add(
1766 subm,
1764 subm,
1767 transaction,
1765 transaction,
1768 link,
1766 link,
1769 subp1,
1767 subp1,
1770 subp2,
1768 subp2,
1771 None,
1769 None,
1772 None,
1770 None,
1773 readtree=readtree,
1771 readtree=readtree,
1774 match=match,
1772 match=match,
1775 )
1773 )
1776
1774
1777 m.writesubtrees(m1, m2, writesubtree, match)
1775 m.writesubtrees(m1, m2, writesubtree, match)
1778 text = m.dirtext()
1776 text = m.dirtext()
1779 n = None
1777 n = None
1780 if self.tree != b'':
1778 if self.tree != b'':
1781 # Double-check whether contents are unchanged to one parent
1779 # Double-check whether contents are unchanged to one parent
1782 if text == m1.dirtext():
1780 if text == m1.dirtext():
1783 n = m1.node()
1781 n = m1.node()
1784 elif text == m2.dirtext():
1782 elif text == m2.dirtext():
1785 n = m2.node()
1783 n = m2.node()
1786
1784
1787 if not n:
1785 if not n:
1788 rev = self._revlog.addrevision(
1786 rev = self._revlog.addrevision(
1789 text, transaction, link, m1.node(), m2.node()
1787 text, transaction, link, m1.node(), m2.node()
1790 )
1788 )
1791 n = self._revlog.node(rev)
1789 n = self._revlog.node(rev)
1792
1790
1793 # Save nodeid so parent manifest can calculate its nodeid
1791 # Save nodeid so parent manifest can calculate its nodeid
1794 m.setnode(n)
1792 m.setnode(n)
1795 return n
1793 return n
1796
1794
1797 def __len__(self):
1795 def __len__(self):
1798 return len(self._revlog)
1796 return len(self._revlog)
1799
1797
1800 def __iter__(self):
1798 def __iter__(self):
1801 return self._revlog.__iter__()
1799 return self._revlog.__iter__()
1802
1800
1803 def rev(self, node):
1801 def rev(self, node):
1804 return self._revlog.rev(node)
1802 return self._revlog.rev(node)
1805
1803
1806 def node(self, rev):
1804 def node(self, rev):
1807 return self._revlog.node(rev)
1805 return self._revlog.node(rev)
1808
1806
1809 def lookup(self, value):
1807 def lookup(self, value):
1810 return self._revlog.lookup(value)
1808 return self._revlog.lookup(value)
1811
1809
1812 def parentrevs(self, rev):
1810 def parentrevs(self, rev):
1813 return self._revlog.parentrevs(rev)
1811 return self._revlog.parentrevs(rev)
1814
1812
1815 def parents(self, node):
1813 def parents(self, node):
1816 return self._revlog.parents(node)
1814 return self._revlog.parents(node)
1817
1815
1818 def linkrev(self, rev):
1816 def linkrev(self, rev):
1819 return self._revlog.linkrev(rev)
1817 return self._revlog.linkrev(rev)
1820
1818
1821 def checksize(self):
1819 def checksize(self):
1822 return self._revlog.checksize()
1820 return self._revlog.checksize()
1823
1821
1824 def revision(self, node, _df=None, raw=False):
1822 def revision(self, node, _df=None, raw=False):
1825 return self._revlog.revision(node, _df=_df, raw=raw)
1823 return self._revlog.revision(node, _df=_df, raw=raw)
1826
1824
1827 def rawdata(self, node, _df=None):
1825 def rawdata(self, node, _df=None):
1828 return self._revlog.rawdata(node, _df=_df)
1826 return self._revlog.rawdata(node, _df=_df)
1829
1827
1830 def revdiff(self, rev1, rev2):
1828 def revdiff(self, rev1, rev2):
1831 return self._revlog.revdiff(rev1, rev2)
1829 return self._revlog.revdiff(rev1, rev2)
1832
1830
1833 def cmp(self, node, text):
1831 def cmp(self, node, text):
1834 return self._revlog.cmp(node, text)
1832 return self._revlog.cmp(node, text)
1835
1833
1836 def deltaparent(self, rev):
1834 def deltaparent(self, rev):
1837 return self._revlog.deltaparent(rev)
1835 return self._revlog.deltaparent(rev)
1838
1836
1839 def emitrevisions(
1837 def emitrevisions(
1840 self,
1838 self,
1841 nodes,
1839 nodes,
1842 nodesorder=None,
1840 nodesorder=None,
1843 revisiondata=False,
1841 revisiondata=False,
1844 assumehaveparentrevisions=False,
1842 assumehaveparentrevisions=False,
1845 deltamode=repository.CG_DELTAMODE_STD,
1843 deltamode=repository.CG_DELTAMODE_STD,
1846 sidedata_helpers=None,
1844 sidedata_helpers=None,
1847 ):
1845 ):
1848 return self._revlog.emitrevisions(
1846 return self._revlog.emitrevisions(
1849 nodes,
1847 nodes,
1850 nodesorder=nodesorder,
1848 nodesorder=nodesorder,
1851 revisiondata=revisiondata,
1849 revisiondata=revisiondata,
1852 assumehaveparentrevisions=assumehaveparentrevisions,
1850 assumehaveparentrevisions=assumehaveparentrevisions,
1853 deltamode=deltamode,
1851 deltamode=deltamode,
1854 sidedata_helpers=sidedata_helpers,
1852 sidedata_helpers=sidedata_helpers,
1855 )
1853 )
1856
1854
1857 def addgroup(
1855 def addgroup(
1858 self,
1856 self,
1859 deltas,
1857 deltas,
1860 linkmapper,
1858 linkmapper,
1861 transaction,
1859 transaction,
1862 alwayscache=False,
1860 alwayscache=False,
1863 addrevisioncb=None,
1861 addrevisioncb=None,
1864 duplicaterevisioncb=None,
1862 duplicaterevisioncb=None,
1865 ):
1863 ):
1866 return self._revlog.addgroup(
1864 return self._revlog.addgroup(
1867 deltas,
1865 deltas,
1868 linkmapper,
1866 linkmapper,
1869 transaction,
1867 transaction,
1870 alwayscache=alwayscache,
1868 alwayscache=alwayscache,
1871 addrevisioncb=addrevisioncb,
1869 addrevisioncb=addrevisioncb,
1872 duplicaterevisioncb=duplicaterevisioncb,
1870 duplicaterevisioncb=duplicaterevisioncb,
1873 )
1871 )
1874
1872
1875 def rawsize(self, rev):
1873 def rawsize(self, rev):
1876 return self._revlog.rawsize(rev)
1874 return self._revlog.rawsize(rev)
1877
1875
1878 def getstrippoint(self, minlink):
1876 def getstrippoint(self, minlink):
1879 return self._revlog.getstrippoint(minlink)
1877 return self._revlog.getstrippoint(minlink)
1880
1878
1881 def strip(self, minlink, transaction):
1879 def strip(self, minlink, transaction):
1882 return self._revlog.strip(minlink, transaction)
1880 return self._revlog.strip(minlink, transaction)
1883
1881
1884 def files(self):
1882 def files(self):
1885 return self._revlog.files()
1883 return self._revlog.files()
1886
1884
1887 def clone(self, tr, destrevlog, **kwargs):
1885 def clone(self, tr, destrevlog, **kwargs):
1888 if not isinstance(destrevlog, manifestrevlog):
1886 if not isinstance(destrevlog, manifestrevlog):
1889 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1887 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1890
1888
1891 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1889 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1892
1890
1893 def storageinfo(
1891 def storageinfo(
1894 self,
1892 self,
1895 exclusivefiles=False,
1893 exclusivefiles=False,
1896 sharedfiles=False,
1894 sharedfiles=False,
1897 revisionscount=False,
1895 revisionscount=False,
1898 trackedsize=False,
1896 trackedsize=False,
1899 storedsize=False,
1897 storedsize=False,
1900 ):
1898 ):
1901 return self._revlog.storageinfo(
1899 return self._revlog.storageinfo(
1902 exclusivefiles=exclusivefiles,
1900 exclusivefiles=exclusivefiles,
1903 sharedfiles=sharedfiles,
1901 sharedfiles=sharedfiles,
1904 revisionscount=revisionscount,
1902 revisionscount=revisionscount,
1905 trackedsize=trackedsize,
1903 trackedsize=trackedsize,
1906 storedsize=storedsize,
1904 storedsize=storedsize,
1907 )
1905 )
1908
1906
1909 @property
1907 @property
1910 def opener(self):
1908 def opener(self):
1911 return self._revlog.opener
1909 return self._revlog.opener
1912
1910
1913 @opener.setter
1911 @opener.setter
1914 def opener(self, value):
1912 def opener(self, value):
1915 self._revlog.opener = value
1913 self._revlog.opener = value
1916
1914
1917
1915
1918 @interfaceutil.implementer(repository.imanifestlog)
1916 @interfaceutil.implementer(repository.imanifestlog)
1919 class manifestlog(object):
1917 class manifestlog(object):
1920 """A collection class representing the collection of manifest snapshots
1918 """A collection class representing the collection of manifest snapshots
1921 referenced by commits in the repository.
1919 referenced by commits in the repository.
1922
1920
1923 In this situation, 'manifest' refers to the abstract concept of a snapshot
1921 In this situation, 'manifest' refers to the abstract concept of a snapshot
1924 of the list of files in the given commit. Consumers of the output of this
1922 of the list of files in the given commit. Consumers of the output of this
1925 class do not care about the implementation details of the actual manifests
1923 class do not care about the implementation details of the actual manifests
1926 they receive (i.e. tree or flat or lazily loaded, etc)."""
1924 they receive (i.e. tree or flat or lazily loaded, etc)."""
1927
1925
1928 def __init__(self, opener, repo, rootstore, narrowmatch):
1926 def __init__(self, opener, repo, rootstore, narrowmatch):
1929 self.nodeconstants = repo.nodeconstants
1927 self.nodeconstants = repo.nodeconstants
1930 usetreemanifest = False
1928 usetreemanifest = False
1931 cachesize = 4
1929 cachesize = 4
1932
1930
1933 opts = getattr(opener, 'options', None)
1931 opts = getattr(opener, 'options', None)
1934 if opts is not None:
1932 if opts is not None:
1935 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1933 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1936 cachesize = opts.get(b'manifestcachesize', cachesize)
1934 cachesize = opts.get(b'manifestcachesize', cachesize)
1937
1935
1938 self._treemanifests = usetreemanifest
1936 self._treemanifests = usetreemanifest
1939
1937
1940 self._rootstore = rootstore
1938 self._rootstore = rootstore
1941 self._rootstore._setupmanifestcachehooks(repo)
1939 self._rootstore._setupmanifestcachehooks(repo)
1942 self._narrowmatch = narrowmatch
1940 self._narrowmatch = narrowmatch
1943
1941
1944 # A cache of the manifestctx or treemanifestctx for each directory
1942 # A cache of the manifestctx or treemanifestctx for each directory
1945 self._dirmancache = {}
1943 self._dirmancache = {}
1946 self._dirmancache[b''] = util.lrucachedict(cachesize)
1944 self._dirmancache[b''] = util.lrucachedict(cachesize)
1947
1945
1948 self._cachesize = cachesize
1946 self._cachesize = cachesize
1949
1947
1950 def __getitem__(self, node):
1948 def __getitem__(self, node):
1951 """Retrieves the manifest instance for the given node. Throws a
1949 """Retrieves the manifest instance for the given node. Throws a
1952 LookupError if not found.
1950 LookupError if not found.
1953 """
1951 """
1954 return self.get(b'', node)
1952 return self.get(b'', node)
1955
1953
1956 def get(self, tree, node, verify=True):
1954 def get(self, tree, node, verify=True):
1957 """Retrieves the manifest instance for the given node. Throws a
1955 """Retrieves the manifest instance for the given node. Throws a
1958 LookupError if not found.
1956 LookupError if not found.
1959
1957
1960 `verify` - if True an exception will be thrown if the node is not in
1958 `verify` - if True an exception will be thrown if the node is not in
1961 the revlog
1959 the revlog
1962 """
1960 """
1963 if node in self._dirmancache.get(tree, ()):
1961 if node in self._dirmancache.get(tree, ()):
1964 return self._dirmancache[tree][node]
1962 return self._dirmancache[tree][node]
1965
1963
1966 if not self._narrowmatch.always():
1964 if not self._narrowmatch.always():
1967 if not self._narrowmatch.visitdir(tree[:-1]):
1965 if not self._narrowmatch.visitdir(tree[:-1]):
1968 return excludeddirmanifestctx(self.nodeconstants, tree, node)
1966 return excludeddirmanifestctx(self.nodeconstants, tree, node)
1969 if tree:
1967 if tree:
1970 if self._rootstore._treeondisk:
1968 if self._rootstore._treeondisk:
1971 if verify:
1969 if verify:
1972 # Side-effect is LookupError is raised if node doesn't
1970 # Side-effect is LookupError is raised if node doesn't
1973 # exist.
1971 # exist.
1974 self.getstorage(tree).rev(node)
1972 self.getstorage(tree).rev(node)
1975
1973
1976 m = treemanifestctx(self, tree, node)
1974 m = treemanifestctx(self, tree, node)
1977 else:
1975 else:
1978 raise error.Abort(
1976 raise error.Abort(
1979 _(
1977 _(
1980 b"cannot ask for manifest directory '%s' in a flat "
1978 b"cannot ask for manifest directory '%s' in a flat "
1981 b"manifest"
1979 b"manifest"
1982 )
1980 )
1983 % tree
1981 % tree
1984 )
1982 )
1985 else:
1983 else:
1986 if verify:
1984 if verify:
1987 # Side-effect is LookupError is raised if node doesn't exist.
1985 # Side-effect is LookupError is raised if node doesn't exist.
1988 self._rootstore.rev(node)
1986 self._rootstore.rev(node)
1989
1987
1990 if self._treemanifests:
1988 if self._treemanifests:
1991 m = treemanifestctx(self, b'', node)
1989 m = treemanifestctx(self, b'', node)
1992 else:
1990 else:
1993 m = manifestctx(self, node)
1991 m = manifestctx(self, node)
1994
1992
1995 if node != self.nodeconstants.nullid:
1993 if node != self.nodeconstants.nullid:
1996 mancache = self._dirmancache.get(tree)
1994 mancache = self._dirmancache.get(tree)
1997 if not mancache:
1995 if not mancache:
1998 mancache = util.lrucachedict(self._cachesize)
1996 mancache = util.lrucachedict(self._cachesize)
1999 self._dirmancache[tree] = mancache
1997 self._dirmancache[tree] = mancache
2000 mancache[node] = m
1998 mancache[node] = m
2001 return m
1999 return m
2002
2000
2003 def getstorage(self, tree):
2001 def getstorage(self, tree):
2004 return self._rootstore.dirlog(tree)
2002 return self._rootstore.dirlog(tree)
2005
2003
2006 def clearcaches(self, clear_persisted_data=False):
2004 def clearcaches(self, clear_persisted_data=False):
2007 self._dirmancache.clear()
2005 self._dirmancache.clear()
2008 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
2006 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
2009
2007
2010 def rev(self, node):
2008 def rev(self, node):
2011 return self._rootstore.rev(node)
2009 return self._rootstore.rev(node)
2012
2010
2013 def update_caches(self, transaction):
2011 def update_caches(self, transaction):
2014 return self._rootstore._revlog.update_caches(transaction=transaction)
2012 return self._rootstore._revlog.update_caches(transaction=transaction)
2015
2013
2016
2014
2017 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2015 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2018 class memmanifestctx(object):
2016 class memmanifestctx(object):
2019 def __init__(self, manifestlog):
2017 def __init__(self, manifestlog):
2020 self._manifestlog = manifestlog
2018 self._manifestlog = manifestlog
2021 self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
2019 self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
2022
2020
2023 def _storage(self):
2021 def _storage(self):
2024 return self._manifestlog.getstorage(b'')
2022 return self._manifestlog.getstorage(b'')
2025
2023
2026 def copy(self):
2024 def copy(self):
2027 memmf = memmanifestctx(self._manifestlog)
2025 memmf = memmanifestctx(self._manifestlog)
2028 memmf._manifestdict = self.read().copy()
2026 memmf._manifestdict = self.read().copy()
2029 return memmf
2027 return memmf
2030
2028
2031 def read(self):
2029 def read(self):
2032 return self._manifestdict
2030 return self._manifestdict
2033
2031
2034 def write(self, transaction, link, p1, p2, added, removed, match=None):
2032 def write(self, transaction, link, p1, p2, added, removed, match=None):
2035 return self._storage().add(
2033 return self._storage().add(
2036 self._manifestdict,
2034 self._manifestdict,
2037 transaction,
2035 transaction,
2038 link,
2036 link,
2039 p1,
2037 p1,
2040 p2,
2038 p2,
2041 added,
2039 added,
2042 removed,
2040 removed,
2043 match=match,
2041 match=match,
2044 )
2042 )
2045
2043
2046
2044
2047 @interfaceutil.implementer(repository.imanifestrevisionstored)
2045 @interfaceutil.implementer(repository.imanifestrevisionstored)
2048 class manifestctx(object):
2046 class manifestctx(object):
2049 """A class representing a single revision of a manifest, including its
2047 """A class representing a single revision of a manifest, including its
2050 contents, its parent revs, and its linkrev.
2048 contents, its parent revs, and its linkrev.
2051 """
2049 """
2052
2050
2053 def __init__(self, manifestlog, node):
2051 def __init__(self, manifestlog, node):
2054 self._manifestlog = manifestlog
2052 self._manifestlog = manifestlog
2055 self._data = None
2053 self._data = None
2056
2054
2057 self._node = node
2055 self._node = node
2058
2056
2059 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
2057 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
2060 # but let's add it later when something needs it and we can load it
2058 # but let's add it later when something needs it and we can load it
2061 # lazily.
2059 # lazily.
2062 # self.p1, self.p2 = store.parents(node)
2060 # self.p1, self.p2 = store.parents(node)
2063 # rev = store.rev(node)
2061 # rev = store.rev(node)
2064 # self.linkrev = store.linkrev(rev)
2062 # self.linkrev = store.linkrev(rev)
2065
2063
2066 def _storage(self):
2064 def _storage(self):
2067 return self._manifestlog.getstorage(b'')
2065 return self._manifestlog.getstorage(b'')
2068
2066
2069 def node(self):
2067 def node(self):
2070 return self._node
2068 return self._node
2071
2069
2072 def copy(self):
2070 def copy(self):
2073 memmf = memmanifestctx(self._manifestlog)
2071 memmf = memmanifestctx(self._manifestlog)
2074 memmf._manifestdict = self.read().copy()
2072 memmf._manifestdict = self.read().copy()
2075 return memmf
2073 return memmf
2076
2074
2077 @propertycache
2075 @propertycache
2078 def parents(self):
2076 def parents(self):
2079 return self._storage().parents(self._node)
2077 return self._storage().parents(self._node)
2080
2078
2081 def read(self):
2079 def read(self):
2082 if self._data is None:
2080 if self._data is None:
2083 nc = self._manifestlog.nodeconstants
2081 nc = self._manifestlog.nodeconstants
2084 if self._node == nc.nullid:
2082 if self._node == nc.nullid:
2085 self._data = manifestdict(nc.nodelen)
2083 self._data = manifestdict(nc.nodelen)
2086 else:
2084 else:
2087 store = self._storage()
2085 store = self._storage()
2088 if self._node in store.fulltextcache:
2086 if self._node in store.fulltextcache:
2089 text = pycompat.bytestr(store.fulltextcache[self._node])
2087 text = pycompat.bytestr(store.fulltextcache[self._node])
2090 else:
2088 else:
2091 text = store.revision(self._node)
2089 text = store.revision(self._node)
2092 arraytext = bytearray(text)
2090 arraytext = bytearray(text)
2093 store.fulltextcache[self._node] = arraytext
2091 store.fulltextcache[self._node] = arraytext
2094 self._data = manifestdict(nc.nodelen, text)
2092 self._data = manifestdict(nc.nodelen, text)
2095 return self._data
2093 return self._data
2096
2094
2097 def readfast(self, shallow=False):
2095 def readfast(self, shallow=False):
2098 """Calls either readdelta or read, based on which would be less work.
2096 """Calls either readdelta or read, based on which would be less work.
2099 readdelta is called if the delta is against the p1, and therefore can be
2097 readdelta is called if the delta is against the p1, and therefore can be
2100 read quickly.
2098 read quickly.
2101
2099
2102 If `shallow` is True, nothing changes since this is a flat manifest.
2100 If `shallow` is True, nothing changes since this is a flat manifest.
2103 """
2101 """
2104 store = self._storage()
2102 store = self._storage()
2105 r = store.rev(self._node)
2103 r = store.rev(self._node)
2106 deltaparent = store.deltaparent(r)
2104 deltaparent = store.deltaparent(r)
2107 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2105 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2108 return self.readdelta()
2106 return self.readdelta()
2109 return self.read()
2107 return self.read()
2110
2108
2111 def readdelta(self, shallow=False):
2109 def readdelta(self, shallow=False):
2112 """Returns a manifest containing just the entries that are present
2110 """Returns a manifest containing just the entries that are present
2113 in this manifest, but not in its p1 manifest. This is efficient to read
2111 in this manifest, but not in its p1 manifest. This is efficient to read
2114 if the revlog delta is already p1.
2112 if the revlog delta is already p1.
2115
2113
2116 Changing the value of `shallow` has no effect on flat manifests.
2114 Changing the value of `shallow` has no effect on flat manifests.
2117 """
2115 """
2118 store = self._storage()
2116 store = self._storage()
2119 r = store.rev(self._node)
2117 r = store.rev(self._node)
2120 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2118 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2121 return manifestdict(store.nodeconstants.nodelen, d)
2119 return manifestdict(store.nodeconstants.nodelen, d)
2122
2120
2123 def find(self, key):
2121 def find(self, key):
2124 return self.read().find(key)
2122 return self.read().find(key)
2125
2123
2126
2124
2127 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2125 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2128 class memtreemanifestctx(object):
2126 class memtreemanifestctx(object):
2129 def __init__(self, manifestlog, dir=b''):
2127 def __init__(self, manifestlog, dir=b''):
2130 self._manifestlog = manifestlog
2128 self._manifestlog = manifestlog
2131 self._dir = dir
2129 self._dir = dir
2132 self._treemanifest = treemanifest(manifestlog.nodeconstants)
2130 self._treemanifest = treemanifest(manifestlog.nodeconstants)
2133
2131
2134 def _storage(self):
2132 def _storage(self):
2135 return self._manifestlog.getstorage(b'')
2133 return self._manifestlog.getstorage(b'')
2136
2134
2137 def copy(self):
2135 def copy(self):
2138 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2136 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2139 memmf._treemanifest = self._treemanifest.copy()
2137 memmf._treemanifest = self._treemanifest.copy()
2140 return memmf
2138 return memmf
2141
2139
2142 def read(self):
2140 def read(self):
2143 return self._treemanifest
2141 return self._treemanifest
2144
2142
2145 def write(self, transaction, link, p1, p2, added, removed, match=None):
2143 def write(self, transaction, link, p1, p2, added, removed, match=None):
2146 def readtree(dir, node):
2144 def readtree(dir, node):
2147 return self._manifestlog.get(dir, node).read()
2145 return self._manifestlog.get(dir, node).read()
2148
2146
2149 return self._storage().add(
2147 return self._storage().add(
2150 self._treemanifest,
2148 self._treemanifest,
2151 transaction,
2149 transaction,
2152 link,
2150 link,
2153 p1,
2151 p1,
2154 p2,
2152 p2,
2155 added,
2153 added,
2156 removed,
2154 removed,
2157 readtree=readtree,
2155 readtree=readtree,
2158 match=match,
2156 match=match,
2159 )
2157 )
2160
2158
2161
2159
2162 @interfaceutil.implementer(repository.imanifestrevisionstored)
2160 @interfaceutil.implementer(repository.imanifestrevisionstored)
2163 class treemanifestctx(object):
2161 class treemanifestctx(object):
2164 def __init__(self, manifestlog, dir, node):
2162 def __init__(self, manifestlog, dir, node):
2165 self._manifestlog = manifestlog
2163 self._manifestlog = manifestlog
2166 self._dir = dir
2164 self._dir = dir
2167 self._data = None
2165 self._data = None
2168
2166
2169 self._node = node
2167 self._node = node
2170
2168
2171 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2169 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2172 # we can instantiate treemanifestctx objects for directories we don't
2170 # we can instantiate treemanifestctx objects for directories we don't
2173 # have on disk.
2171 # have on disk.
2174 # self.p1, self.p2 = store.parents(node)
2172 # self.p1, self.p2 = store.parents(node)
2175 # rev = store.rev(node)
2173 # rev = store.rev(node)
2176 # self.linkrev = store.linkrev(rev)
2174 # self.linkrev = store.linkrev(rev)
2177
2175
2178 def _storage(self):
2176 def _storage(self):
2179 narrowmatch = self._manifestlog._narrowmatch
2177 narrowmatch = self._manifestlog._narrowmatch
2180 if not narrowmatch.always():
2178 if not narrowmatch.always():
2181 if not narrowmatch.visitdir(self._dir[:-1]):
2179 if not narrowmatch.visitdir(self._dir[:-1]):
2182 return excludedmanifestrevlog(
2180 return excludedmanifestrevlog(
2183 self._manifestlog.nodeconstants, self._dir
2181 self._manifestlog.nodeconstants, self._dir
2184 )
2182 )
2185 return self._manifestlog.getstorage(self._dir)
2183 return self._manifestlog.getstorage(self._dir)
2186
2184
2187 def read(self):
2185 def read(self):
2188 if self._data is None:
2186 if self._data is None:
2189 store = self._storage()
2187 store = self._storage()
2190 if self._node == self._manifestlog.nodeconstants.nullid:
2188 if self._node == self._manifestlog.nodeconstants.nullid:
2191 self._data = treemanifest(self._manifestlog.nodeconstants)
2189 self._data = treemanifest(self._manifestlog.nodeconstants)
2192 # TODO accessing non-public API
2190 # TODO accessing non-public API
2193 elif store._treeondisk:
2191 elif store._treeondisk:
2194 m = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2192 m = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2195
2193
2196 def gettext():
2194 def gettext():
2197 return store.revision(self._node)
2195 return store.revision(self._node)
2198
2196
2199 def readsubtree(dir, subm):
2197 def readsubtree(dir, subm):
2200 # Set verify to False since we need to be able to create
2198 # Set verify to False since we need to be able to create
2201 # subtrees for trees that don't exist on disk.
2199 # subtrees for trees that don't exist on disk.
2202 return self._manifestlog.get(dir, subm, verify=False).read()
2200 return self._manifestlog.get(dir, subm, verify=False).read()
2203
2201
2204 m.read(gettext, readsubtree)
2202 m.read(gettext, readsubtree)
2205 m.setnode(self._node)
2203 m.setnode(self._node)
2206 self._data = m
2204 self._data = m
2207 else:
2205 else:
2208 if self._node in store.fulltextcache:
2206 if self._node in store.fulltextcache:
2209 text = pycompat.bytestr(store.fulltextcache[self._node])
2207 text = pycompat.bytestr(store.fulltextcache[self._node])
2210 else:
2208 else:
2211 text = store.revision(self._node)
2209 text = store.revision(self._node)
2212 arraytext = bytearray(text)
2210 arraytext = bytearray(text)
2213 store.fulltextcache[self._node] = arraytext
2211 store.fulltextcache[self._node] = arraytext
2214 self._data = treemanifest(
2212 self._data = treemanifest(
2215 self._manifestlog.nodeconstants, dir=self._dir, text=text
2213 self._manifestlog.nodeconstants, dir=self._dir, text=text
2216 )
2214 )
2217
2215
2218 return self._data
2216 return self._data
2219
2217
2220 def node(self):
2218 def node(self):
2221 return self._node
2219 return self._node
2222
2220
2223 def copy(self):
2221 def copy(self):
2224 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2222 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2225 memmf._treemanifest = self.read().copy()
2223 memmf._treemanifest = self.read().copy()
2226 return memmf
2224 return memmf
2227
2225
2228 @propertycache
2226 @propertycache
2229 def parents(self):
2227 def parents(self):
2230 return self._storage().parents(self._node)
2228 return self._storage().parents(self._node)
2231
2229
2232 def readdelta(self, shallow=False):
2230 def readdelta(self, shallow=False):
2233 """Returns a manifest containing just the entries that are present
2231 """Returns a manifest containing just the entries that are present
2234 in this manifest, but not in its p1 manifest. This is efficient to read
2232 in this manifest, but not in its p1 manifest. This is efficient to read
2235 if the revlog delta is already p1.
2233 if the revlog delta is already p1.
2236
2234
2237 If `shallow` is True, this will read the delta for this directory,
2235 If `shallow` is True, this will read the delta for this directory,
2238 without recursively reading subdirectory manifests. Instead, any
2236 without recursively reading subdirectory manifests. Instead, any
2239 subdirectory entry will be reported as it appears in the manifest, i.e.
2237 subdirectory entry will be reported as it appears in the manifest, i.e.
2240 the subdirectory will be reported among files and distinguished only by
2238 the subdirectory will be reported among files and distinguished only by
2241 its 't' flag.
2239 its 't' flag.
2242 """
2240 """
2243 store = self._storage()
2241 store = self._storage()
2244 if shallow:
2242 if shallow:
2245 r = store.rev(self._node)
2243 r = store.rev(self._node)
2246 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2244 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2247 return manifestdict(store.nodeconstants.nodelen, d)
2245 return manifestdict(store.nodeconstants.nodelen, d)
2248 else:
2246 else:
2249 # Need to perform a slow delta
2247 # Need to perform a slow delta
2250 r0 = store.deltaparent(store.rev(self._node))
2248 r0 = store.deltaparent(store.rev(self._node))
2251 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2249 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2252 m1 = self.read()
2250 m1 = self.read()
2253 md = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2251 md = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2254 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2252 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2255 if n1:
2253 if n1:
2256 md[f] = n1
2254 md[f] = n1
2257 if fl1:
2255 if fl1:
2258 md.setflag(f, fl1)
2256 md.setflag(f, fl1)
2259 return md
2257 return md
2260
2258
2261 def readfast(self, shallow=False):
2259 def readfast(self, shallow=False):
2262 """Calls either readdelta or read, based on which would be less work.
2260 """Calls either readdelta or read, based on which would be less work.
2263 readdelta is called if the delta is against the p1, and therefore can be
2261 readdelta is called if the delta is against the p1, and therefore can be
2264 read quickly.
2262 read quickly.
2265
2263
2266 If `shallow` is True, it only returns the entries from this manifest,
2264 If `shallow` is True, it only returns the entries from this manifest,
2267 and not any submanifests.
2265 and not any submanifests.
2268 """
2266 """
2269 store = self._storage()
2267 store = self._storage()
2270 r = store.rev(self._node)
2268 r = store.rev(self._node)
2271 deltaparent = store.deltaparent(r)
2269 deltaparent = store.deltaparent(r)
2272 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2270 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2273 return self.readdelta(shallow=shallow)
2271 return self.readdelta(shallow=shallow)
2274
2272
2275 if shallow:
2273 if shallow:
2276 return manifestdict(
2274 return manifestdict(
2277 store.nodeconstants.nodelen, store.revision(self._node)
2275 store.nodeconstants.nodelen, store.revision(self._node)
2278 )
2276 )
2279 else:
2277 else:
2280 return self.read()
2278 return self.read()
2281
2279
2282 def find(self, key):
2280 def find(self, key):
2283 return self.read().find(key)
2281 return self.read().find(key)
2284
2282
2285
2283
2286 class excludeddir(treemanifest):
2284 class excludeddir(treemanifest):
2287 """Stand-in for a directory that is excluded from the repository.
2285 """Stand-in for a directory that is excluded from the repository.
2288
2286
2289 With narrowing active on a repository that uses treemanifests,
2287 With narrowing active on a repository that uses treemanifests,
2290 some of the directory revlogs will be excluded from the resulting
2288 some of the directory revlogs will be excluded from the resulting
2291 clone. This is a huge storage win for clients, but means we need
2289 clone. This is a huge storage win for clients, but means we need
2292 some sort of pseudo-manifest to surface to internals so we can
2290 some sort of pseudo-manifest to surface to internals so we can
2293 detect a merge conflict outside the narrowspec. That's what this
2291 detect a merge conflict outside the narrowspec. That's what this
2294 class is: it stands in for a directory whose node is known, but
2292 class is: it stands in for a directory whose node is known, but
2295 whose contents are unknown.
2293 whose contents are unknown.
2296 """
2294 """
2297
2295
2298 def __init__(self, nodeconstants, dir, node):
2296 def __init__(self, nodeconstants, dir, node):
2299 super(excludeddir, self).__init__(nodeconstants, dir)
2297 super(excludeddir, self).__init__(nodeconstants, dir)
2300 self._node = node
2298 self._node = node
2301 # Add an empty file, which will be included by iterators and such,
2299 # Add an empty file, which will be included by iterators and such,
2302 # appearing as the directory itself (i.e. something like "dir/")
2300 # appearing as the directory itself (i.e. something like "dir/")
2303 self._files[b''] = node
2301 self._files[b''] = node
2304 self._flags[b''] = b't'
2302 self._flags[b''] = b't'
2305
2303
2306 # Manifests outside the narrowspec should never be modified, so avoid
2304 # Manifests outside the narrowspec should never be modified, so avoid
2307 # copying. This makes a noticeable difference when there are very many
2305 # copying. This makes a noticeable difference when there are very many
2308 # directories outside the narrowspec. Also, it makes sense for the copy to
2306 # directories outside the narrowspec. Also, it makes sense for the copy to
2309 # be of the same type as the original, which would not happen with the
2307 # be of the same type as the original, which would not happen with the
2310 # super type's copy().
2308 # super type's copy().
2311 def copy(self):
2309 def copy(self):
2312 return self
2310 return self
2313
2311
2314
2312
2315 class excludeddirmanifestctx(treemanifestctx):
2313 class excludeddirmanifestctx(treemanifestctx):
2316 """context wrapper for excludeddir - see that docstring for rationale"""
2314 """context wrapper for excludeddir - see that docstring for rationale"""
2317
2315
2318 def __init__(self, nodeconstants, dir, node):
2316 def __init__(self, nodeconstants, dir, node):
2319 self.nodeconstants = nodeconstants
2317 self.nodeconstants = nodeconstants
2320 self._dir = dir
2318 self._dir = dir
2321 self._node = node
2319 self._node = node
2322
2320
2323 def read(self):
2321 def read(self):
2324 return excludeddir(self.nodeconstants, self._dir, self._node)
2322 return excludeddir(self.nodeconstants, self._dir, self._node)
2325
2323
2326 def readfast(self, shallow=False):
2324 def readfast(self, shallow=False):
2327 # special version of readfast since we don't have underlying storage
2325 # special version of readfast since we don't have underlying storage
2328 return self.read()
2326 return self.read()
2329
2327
2330 def write(self, *args):
2328 def write(self, *args):
2331 raise error.ProgrammingError(
2329 raise error.ProgrammingError(
2332 b'attempt to write manifest from excluded dir %s' % self._dir
2330 b'attempt to write manifest from excluded dir %s' % self._dir
2333 )
2331 )
2334
2332
2335
2333
2336 class excludedmanifestrevlog(manifestrevlog):
2334 class excludedmanifestrevlog(manifestrevlog):
2337 """Stand-in for excluded treemanifest revlogs.
2335 """Stand-in for excluded treemanifest revlogs.
2338
2336
2339 When narrowing is active on a treemanifest repository, we'll have
2337 When narrowing is active on a treemanifest repository, we'll have
2340 references to directories we can't see due to the revlog being
2338 references to directories we can't see due to the revlog being
2341 skipped. This class exists to conform to the manifestrevlog
2339 skipped. This class exists to conform to the manifestrevlog
2342 interface for those directories and proactively prevent writes to
2340 interface for those directories and proactively prevent writes to
2343 outside the narrowspec.
2341 outside the narrowspec.
2344 """
2342 """
2345
2343
2346 def __init__(self, nodeconstants, dir):
2344 def __init__(self, nodeconstants, dir):
2347 self.nodeconstants = nodeconstants
2345 self.nodeconstants = nodeconstants
2348 self._dir = dir
2346 self._dir = dir
2349
2347
2350 def __len__(self):
2348 def __len__(self):
2351 raise error.ProgrammingError(
2349 raise error.ProgrammingError(
2352 b'attempt to get length of excluded dir %s' % self._dir
2350 b'attempt to get length of excluded dir %s' % self._dir
2353 )
2351 )
2354
2352
2355 def rev(self, node):
2353 def rev(self, node):
2356 raise error.ProgrammingError(
2354 raise error.ProgrammingError(
2357 b'attempt to get rev from excluded dir %s' % self._dir
2355 b'attempt to get rev from excluded dir %s' % self._dir
2358 )
2356 )
2359
2357
2360 def linkrev(self, node):
2358 def linkrev(self, node):
2361 raise error.ProgrammingError(
2359 raise error.ProgrammingError(
2362 b'attempt to get linkrev from excluded dir %s' % self._dir
2360 b'attempt to get linkrev from excluded dir %s' % self._dir
2363 )
2361 )
2364
2362
2365 def node(self, rev):
2363 def node(self, rev):
2366 raise error.ProgrammingError(
2364 raise error.ProgrammingError(
2367 b'attempt to get node from excluded dir %s' % self._dir
2365 b'attempt to get node from excluded dir %s' % self._dir
2368 )
2366 )
2369
2367
2370 def add(self, *args, **kwargs):
2368 def add(self, *args, **kwargs):
2371 # We should never write entries in dirlogs outside the narrow clone.
2369 # We should never write entries in dirlogs outside the narrow clone.
2372 # However, the method still gets called from writesubtree() in
2370 # However, the method still gets called from writesubtree() in
2373 # _addtree(), so we need to handle it. We should possibly make that
2371 # _addtree(), so we need to handle it. We should possibly make that
2374 # avoid calling add() with a clean manifest (_dirty is always False
2372 # avoid calling add() with a clean manifest (_dirty is always False
2375 # in excludeddir instances).
2373 # in excludeddir instances).
2376 pass
2374 pass
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now