##// END OF EJS Templates
refactor: prefer lookup by revision, even for null...
Joerg Sonnenberger -
r47600:ad878e3f default
parent child Browse files
Show More
@@ -1,3925 +1,3926 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy as copymod
10 import copy as copymod
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 short,
20 short,
20 )
21 )
21 from .pycompat import (
22 from .pycompat import (
22 getattr,
23 getattr,
23 open,
24 open,
24 setattr,
25 setattr,
25 )
26 )
26 from .thirdparty import attr
27 from .thirdparty import attr
27
28
28 from . import (
29 from . import (
29 bookmarks,
30 bookmarks,
30 changelog,
31 changelog,
31 copies,
32 copies,
32 crecord as crecordmod,
33 crecord as crecordmod,
33 dirstateguard,
34 dirstateguard,
34 encoding,
35 encoding,
35 error,
36 error,
36 formatter,
37 formatter,
37 logcmdutil,
38 logcmdutil,
38 match as matchmod,
39 match as matchmod,
39 merge as mergemod,
40 merge as mergemod,
40 mergestate as mergestatemod,
41 mergestate as mergestatemod,
41 mergeutil,
42 mergeutil,
42 obsolete,
43 obsolete,
43 patch,
44 patch,
44 pathutil,
45 pathutil,
45 phases,
46 phases,
46 pycompat,
47 pycompat,
47 repair,
48 repair,
48 revlog,
49 revlog,
49 rewriteutil,
50 rewriteutil,
50 scmutil,
51 scmutil,
51 state as statemod,
52 state as statemod,
52 subrepoutil,
53 subrepoutil,
53 templatekw,
54 templatekw,
54 templater,
55 templater,
55 util,
56 util,
56 vfs as vfsmod,
57 vfs as vfsmod,
57 )
58 )
58
59
59 from .utils import (
60 from .utils import (
60 dateutil,
61 dateutil,
61 stringutil,
62 stringutil,
62 )
63 )
63
64
64 if pycompat.TYPE_CHECKING:
65 if pycompat.TYPE_CHECKING:
65 from typing import (
66 from typing import (
66 Any,
67 Any,
67 Dict,
68 Dict,
68 )
69 )
69
70
70 for t in (Any, Dict):
71 for t in (Any, Dict):
71 assert t
72 assert t
72
73
73 stringio = util.stringio
74 stringio = util.stringio
74
75
75 # templates of common command options
76 # templates of common command options
76
77
77 dryrunopts = [
78 dryrunopts = [
78 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
79 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
79 ]
80 ]
80
81
81 confirmopts = [
82 confirmopts = [
82 (b'', b'confirm', None, _(b'ask before applying actions')),
83 (b'', b'confirm', None, _(b'ask before applying actions')),
83 ]
84 ]
84
85
85 remoteopts = [
86 remoteopts = [
86 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
87 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
87 (
88 (
88 b'',
89 b'',
89 b'remotecmd',
90 b'remotecmd',
90 b'',
91 b'',
91 _(b'specify hg command to run on the remote side'),
92 _(b'specify hg command to run on the remote side'),
92 _(b'CMD'),
93 _(b'CMD'),
93 ),
94 ),
94 (
95 (
95 b'',
96 b'',
96 b'insecure',
97 b'insecure',
97 None,
98 None,
98 _(b'do not verify server certificate (ignoring web.cacerts config)'),
99 _(b'do not verify server certificate (ignoring web.cacerts config)'),
99 ),
100 ),
100 ]
101 ]
101
102
102 walkopts = [
103 walkopts = [
103 (
104 (
104 b'I',
105 b'I',
105 b'include',
106 b'include',
106 [],
107 [],
107 _(b'include names matching the given patterns'),
108 _(b'include names matching the given patterns'),
108 _(b'PATTERN'),
109 _(b'PATTERN'),
109 ),
110 ),
110 (
111 (
111 b'X',
112 b'X',
112 b'exclude',
113 b'exclude',
113 [],
114 [],
114 _(b'exclude names matching the given patterns'),
115 _(b'exclude names matching the given patterns'),
115 _(b'PATTERN'),
116 _(b'PATTERN'),
116 ),
117 ),
117 ]
118 ]
118
119
119 commitopts = [
120 commitopts = [
120 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
121 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
121 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
122 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
122 ]
123 ]
123
124
124 commitopts2 = [
125 commitopts2 = [
125 (
126 (
126 b'd',
127 b'd',
127 b'date',
128 b'date',
128 b'',
129 b'',
129 _(b'record the specified date as commit date'),
130 _(b'record the specified date as commit date'),
130 _(b'DATE'),
131 _(b'DATE'),
131 ),
132 ),
132 (
133 (
133 b'u',
134 b'u',
134 b'user',
135 b'user',
135 b'',
136 b'',
136 _(b'record the specified user as committer'),
137 _(b'record the specified user as committer'),
137 _(b'USER'),
138 _(b'USER'),
138 ),
139 ),
139 ]
140 ]
140
141
141 commitopts3 = [
142 commitopts3 = [
142 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
143 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
143 (b'U', b'currentuser', None, _(b'record the current user as committer')),
144 (b'U', b'currentuser', None, _(b'record the current user as committer')),
144 ]
145 ]
145
146
146 formatteropts = [
147 formatteropts = [
147 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
148 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
148 ]
149 ]
149
150
150 templateopts = [
151 templateopts = [
151 (
152 (
152 b'',
153 b'',
153 b'style',
154 b'style',
154 b'',
155 b'',
155 _(b'display using template map file (DEPRECATED)'),
156 _(b'display using template map file (DEPRECATED)'),
156 _(b'STYLE'),
157 _(b'STYLE'),
157 ),
158 ),
158 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
159 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
159 ]
160 ]
160
161
161 logopts = [
162 logopts = [
162 (b'p', b'patch', None, _(b'show patch')),
163 (b'p', b'patch', None, _(b'show patch')),
163 (b'g', b'git', None, _(b'use git extended diff format')),
164 (b'g', b'git', None, _(b'use git extended diff format')),
164 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
165 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
165 (b'M', b'no-merges', None, _(b'do not show merges')),
166 (b'M', b'no-merges', None, _(b'do not show merges')),
166 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
167 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
167 (b'G', b'graph', None, _(b"show the revision DAG")),
168 (b'G', b'graph', None, _(b"show the revision DAG")),
168 ] + templateopts
169 ] + templateopts
169
170
170 diffopts = [
171 diffopts = [
171 (b'a', b'text', None, _(b'treat all files as text')),
172 (b'a', b'text', None, _(b'treat all files as text')),
172 (
173 (
173 b'g',
174 b'g',
174 b'git',
175 b'git',
175 None,
176 None,
176 _(b'use git extended diff format (DEFAULT: diff.git)'),
177 _(b'use git extended diff format (DEFAULT: diff.git)'),
177 ),
178 ),
178 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
179 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
179 (b'', b'nodates', None, _(b'omit dates from diff headers')),
180 (b'', b'nodates', None, _(b'omit dates from diff headers')),
180 ]
181 ]
181
182
182 diffwsopts = [
183 diffwsopts = [
183 (
184 (
184 b'w',
185 b'w',
185 b'ignore-all-space',
186 b'ignore-all-space',
186 None,
187 None,
187 _(b'ignore white space when comparing lines'),
188 _(b'ignore white space when comparing lines'),
188 ),
189 ),
189 (
190 (
190 b'b',
191 b'b',
191 b'ignore-space-change',
192 b'ignore-space-change',
192 None,
193 None,
193 _(b'ignore changes in the amount of white space'),
194 _(b'ignore changes in the amount of white space'),
194 ),
195 ),
195 (
196 (
196 b'B',
197 b'B',
197 b'ignore-blank-lines',
198 b'ignore-blank-lines',
198 None,
199 None,
199 _(b'ignore changes whose lines are all blank'),
200 _(b'ignore changes whose lines are all blank'),
200 ),
201 ),
201 (
202 (
202 b'Z',
203 b'Z',
203 b'ignore-space-at-eol',
204 b'ignore-space-at-eol',
204 None,
205 None,
205 _(b'ignore changes in whitespace at EOL'),
206 _(b'ignore changes in whitespace at EOL'),
206 ),
207 ),
207 ]
208 ]
208
209
209 diffopts2 = (
210 diffopts2 = (
210 [
211 [
211 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
212 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
212 (
213 (
213 b'p',
214 b'p',
214 b'show-function',
215 b'show-function',
215 None,
216 None,
216 _(
217 _(
217 b'show which function each change is in (DEFAULT: diff.showfunc)'
218 b'show which function each change is in (DEFAULT: diff.showfunc)'
218 ),
219 ),
219 ),
220 ),
220 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
221 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
221 ]
222 ]
222 + diffwsopts
223 + diffwsopts
223 + [
224 + [
224 (
225 (
225 b'U',
226 b'U',
226 b'unified',
227 b'unified',
227 b'',
228 b'',
228 _(b'number of lines of context to show'),
229 _(b'number of lines of context to show'),
229 _(b'NUM'),
230 _(b'NUM'),
230 ),
231 ),
231 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
232 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
232 (
233 (
233 b'',
234 b'',
234 b'root',
235 b'root',
235 b'',
236 b'',
236 _(b'produce diffs relative to subdirectory'),
237 _(b'produce diffs relative to subdirectory'),
237 _(b'DIR'),
238 _(b'DIR'),
238 ),
239 ),
239 ]
240 ]
240 )
241 )
241
242
242 mergetoolopts = [
243 mergetoolopts = [
243 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
244 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
244 ]
245 ]
245
246
246 similarityopts = [
247 similarityopts = [
247 (
248 (
248 b's',
249 b's',
249 b'similarity',
250 b'similarity',
250 b'',
251 b'',
251 _(b'guess renamed files by similarity (0<=s<=100)'),
252 _(b'guess renamed files by similarity (0<=s<=100)'),
252 _(b'SIMILARITY'),
253 _(b'SIMILARITY'),
253 )
254 )
254 ]
255 ]
255
256
256 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
257 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
257
258
258 debugrevlogopts = [
259 debugrevlogopts = [
259 (b'c', b'changelog', False, _(b'open changelog')),
260 (b'c', b'changelog', False, _(b'open changelog')),
260 (b'm', b'manifest', False, _(b'open manifest')),
261 (b'm', b'manifest', False, _(b'open manifest')),
261 (b'', b'dir', b'', _(b'open directory manifest')),
262 (b'', b'dir', b'', _(b'open directory manifest')),
262 ]
263 ]
263
264
264 # special string such that everything below this line will be ingored in the
265 # special string such that everything below this line will be ingored in the
265 # editor text
266 # editor text
266 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
267 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
267
268
268
269
269 def check_at_most_one_arg(opts, *args):
270 def check_at_most_one_arg(opts, *args):
270 """abort if more than one of the arguments are in opts
271 """abort if more than one of the arguments are in opts
271
272
272 Returns the unique argument or None if none of them were specified.
273 Returns the unique argument or None if none of them were specified.
273 """
274 """
274
275
275 def to_display(name):
276 def to_display(name):
276 return pycompat.sysbytes(name).replace(b'_', b'-')
277 return pycompat.sysbytes(name).replace(b'_', b'-')
277
278
278 previous = None
279 previous = None
279 for x in args:
280 for x in args:
280 if opts.get(x):
281 if opts.get(x):
281 if previous:
282 if previous:
282 raise error.InputError(
283 raise error.InputError(
283 _(b'cannot specify both --%s and --%s')
284 _(b'cannot specify both --%s and --%s')
284 % (to_display(previous), to_display(x))
285 % (to_display(previous), to_display(x))
285 )
286 )
286 previous = x
287 previous = x
287 return previous
288 return previous
288
289
289
290
290 def check_incompatible_arguments(opts, first, others):
291 def check_incompatible_arguments(opts, first, others):
291 """abort if the first argument is given along with any of the others
292 """abort if the first argument is given along with any of the others
292
293
293 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
294 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
294 among themselves, and they're passed as a single collection.
295 among themselves, and they're passed as a single collection.
295 """
296 """
296 for other in others:
297 for other in others:
297 check_at_most_one_arg(opts, first, other)
298 check_at_most_one_arg(opts, first, other)
298
299
299
300
300 def resolvecommitoptions(ui, opts):
301 def resolvecommitoptions(ui, opts):
301 """modify commit options dict to handle related options
302 """modify commit options dict to handle related options
302
303
303 The return value indicates that ``rewrite.update-timestamp`` is the reason
304 The return value indicates that ``rewrite.update-timestamp`` is the reason
304 the ``date`` option is set.
305 the ``date`` option is set.
305 """
306 """
306 check_at_most_one_arg(opts, b'date', b'currentdate')
307 check_at_most_one_arg(opts, b'date', b'currentdate')
307 check_at_most_one_arg(opts, b'user', b'currentuser')
308 check_at_most_one_arg(opts, b'user', b'currentuser')
308
309
309 datemaydiffer = False # date-only change should be ignored?
310 datemaydiffer = False # date-only change should be ignored?
310
311
311 if opts.get(b'currentdate'):
312 if opts.get(b'currentdate'):
312 opts[b'date'] = b'%d %d' % dateutil.makedate()
313 opts[b'date'] = b'%d %d' % dateutil.makedate()
313 elif (
314 elif (
314 not opts.get(b'date')
315 not opts.get(b'date')
315 and ui.configbool(b'rewrite', b'update-timestamp')
316 and ui.configbool(b'rewrite', b'update-timestamp')
316 and opts.get(b'currentdate') is None
317 and opts.get(b'currentdate') is None
317 ):
318 ):
318 opts[b'date'] = b'%d %d' % dateutil.makedate()
319 opts[b'date'] = b'%d %d' % dateutil.makedate()
319 datemaydiffer = True
320 datemaydiffer = True
320
321
321 if opts.get(b'currentuser'):
322 if opts.get(b'currentuser'):
322 opts[b'user'] = ui.username()
323 opts[b'user'] = ui.username()
323
324
324 return datemaydiffer
325 return datemaydiffer
325
326
326
327
327 def checknotesize(ui, opts):
328 def checknotesize(ui, opts):
328 """ make sure note is of valid format """
329 """ make sure note is of valid format """
329
330
330 note = opts.get(b'note')
331 note = opts.get(b'note')
331 if not note:
332 if not note:
332 return
333 return
333
334
334 if len(note) > 255:
335 if len(note) > 255:
335 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
336 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
336 if b'\n' in note:
337 if b'\n' in note:
337 raise error.InputError(_(b"note cannot contain a newline"))
338 raise error.InputError(_(b"note cannot contain a newline"))
338
339
339
340
340 def ishunk(x):
341 def ishunk(x):
341 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
342 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
342 return isinstance(x, hunkclasses)
343 return isinstance(x, hunkclasses)
343
344
344
345
345 def newandmodified(chunks, originalchunks):
346 def newandmodified(chunks, originalchunks):
346 newlyaddedandmodifiedfiles = set()
347 newlyaddedandmodifiedfiles = set()
347 alsorestore = set()
348 alsorestore = set()
348 for chunk in chunks:
349 for chunk in chunks:
349 if (
350 if (
350 ishunk(chunk)
351 ishunk(chunk)
351 and chunk.header.isnewfile()
352 and chunk.header.isnewfile()
352 and chunk not in originalchunks
353 and chunk not in originalchunks
353 ):
354 ):
354 newlyaddedandmodifiedfiles.add(chunk.header.filename())
355 newlyaddedandmodifiedfiles.add(chunk.header.filename())
355 alsorestore.update(
356 alsorestore.update(
356 set(chunk.header.files()) - {chunk.header.filename()}
357 set(chunk.header.files()) - {chunk.header.filename()}
357 )
358 )
358 return newlyaddedandmodifiedfiles, alsorestore
359 return newlyaddedandmodifiedfiles, alsorestore
359
360
360
361
361 def parsealiases(cmd):
362 def parsealiases(cmd):
362 base_aliases = cmd.split(b"|")
363 base_aliases = cmd.split(b"|")
363 all_aliases = set(base_aliases)
364 all_aliases = set(base_aliases)
364 extra_aliases = []
365 extra_aliases = []
365 for alias in base_aliases:
366 for alias in base_aliases:
366 if b'-' in alias:
367 if b'-' in alias:
367 folded_alias = alias.replace(b'-', b'')
368 folded_alias = alias.replace(b'-', b'')
368 if folded_alias not in all_aliases:
369 if folded_alias not in all_aliases:
369 all_aliases.add(folded_alias)
370 all_aliases.add(folded_alias)
370 extra_aliases.append(folded_alias)
371 extra_aliases.append(folded_alias)
371 base_aliases.extend(extra_aliases)
372 base_aliases.extend(extra_aliases)
372 return base_aliases
373 return base_aliases
373
374
374
375
375 def setupwrapcolorwrite(ui):
376 def setupwrapcolorwrite(ui):
376 # wrap ui.write so diff output can be labeled/colorized
377 # wrap ui.write so diff output can be labeled/colorized
377 def wrapwrite(orig, *args, **kw):
378 def wrapwrite(orig, *args, **kw):
378 label = kw.pop('label', b'')
379 label = kw.pop('label', b'')
379 for chunk, l in patch.difflabel(lambda: args):
380 for chunk, l in patch.difflabel(lambda: args):
380 orig(chunk, label=label + l)
381 orig(chunk, label=label + l)
381
382
382 oldwrite = ui.write
383 oldwrite = ui.write
383
384
384 def wrap(*args, **kwargs):
385 def wrap(*args, **kwargs):
385 return wrapwrite(oldwrite, *args, **kwargs)
386 return wrapwrite(oldwrite, *args, **kwargs)
386
387
387 setattr(ui, 'write', wrap)
388 setattr(ui, 'write', wrap)
388 return oldwrite
389 return oldwrite
389
390
390
391
391 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
392 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
392 try:
393 try:
393 if usecurses:
394 if usecurses:
394 if testfile:
395 if testfile:
395 recordfn = crecordmod.testdecorator(
396 recordfn = crecordmod.testdecorator(
396 testfile, crecordmod.testchunkselector
397 testfile, crecordmod.testchunkselector
397 )
398 )
398 else:
399 else:
399 recordfn = crecordmod.chunkselector
400 recordfn = crecordmod.chunkselector
400
401
401 return crecordmod.filterpatch(
402 return crecordmod.filterpatch(
402 ui, originalhunks, recordfn, operation
403 ui, originalhunks, recordfn, operation
403 )
404 )
404 except crecordmod.fallbackerror as e:
405 except crecordmod.fallbackerror as e:
405 ui.warn(b'%s\n' % e)
406 ui.warn(b'%s\n' % e)
406 ui.warn(_(b'falling back to text mode\n'))
407 ui.warn(_(b'falling back to text mode\n'))
407
408
408 return patch.filterpatch(ui, originalhunks, match, operation)
409 return patch.filterpatch(ui, originalhunks, match, operation)
409
410
410
411
411 def recordfilter(ui, originalhunks, match, operation=None):
412 def recordfilter(ui, originalhunks, match, operation=None):
412 """Prompts the user to filter the originalhunks and return a list of
413 """Prompts the user to filter the originalhunks and return a list of
413 selected hunks.
414 selected hunks.
414 *operation* is used for to build ui messages to indicate the user what
415 *operation* is used for to build ui messages to indicate the user what
415 kind of filtering they are doing: reverting, committing, shelving, etc.
416 kind of filtering they are doing: reverting, committing, shelving, etc.
416 (see patch.filterpatch).
417 (see patch.filterpatch).
417 """
418 """
418 usecurses = crecordmod.checkcurses(ui)
419 usecurses = crecordmod.checkcurses(ui)
419 testfile = ui.config(b'experimental', b'crecordtest')
420 testfile = ui.config(b'experimental', b'crecordtest')
420 oldwrite = setupwrapcolorwrite(ui)
421 oldwrite = setupwrapcolorwrite(ui)
421 try:
422 try:
422 newchunks, newopts = filterchunks(
423 newchunks, newopts = filterchunks(
423 ui, originalhunks, usecurses, testfile, match, operation
424 ui, originalhunks, usecurses, testfile, match, operation
424 )
425 )
425 finally:
426 finally:
426 ui.write = oldwrite
427 ui.write = oldwrite
427 return newchunks, newopts
428 return newchunks, newopts
428
429
429
430
430 def dorecord(
431 def dorecord(
431 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
432 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
432 ):
433 ):
433 opts = pycompat.byteskwargs(opts)
434 opts = pycompat.byteskwargs(opts)
434 if not ui.interactive():
435 if not ui.interactive():
435 if cmdsuggest:
436 if cmdsuggest:
436 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
437 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
437 else:
438 else:
438 msg = _(b'running non-interactively')
439 msg = _(b'running non-interactively')
439 raise error.InputError(msg)
440 raise error.InputError(msg)
440
441
441 # make sure username is set before going interactive
442 # make sure username is set before going interactive
442 if not opts.get(b'user'):
443 if not opts.get(b'user'):
443 ui.username() # raise exception, username not provided
444 ui.username() # raise exception, username not provided
444
445
445 def recordfunc(ui, repo, message, match, opts):
446 def recordfunc(ui, repo, message, match, opts):
446 """This is generic record driver.
447 """This is generic record driver.
447
448
448 Its job is to interactively filter local changes, and
449 Its job is to interactively filter local changes, and
449 accordingly prepare working directory into a state in which the
450 accordingly prepare working directory into a state in which the
450 job can be delegated to a non-interactive commit command such as
451 job can be delegated to a non-interactive commit command such as
451 'commit' or 'qrefresh'.
452 'commit' or 'qrefresh'.
452
453
453 After the actual job is done by non-interactive command, the
454 After the actual job is done by non-interactive command, the
454 working directory is restored to its original state.
455 working directory is restored to its original state.
455
456
456 In the end we'll record interesting changes, and everything else
457 In the end we'll record interesting changes, and everything else
457 will be left in place, so the user can continue working.
458 will be left in place, so the user can continue working.
458 """
459 """
459 if not opts.get(b'interactive-unshelve'):
460 if not opts.get(b'interactive-unshelve'):
460 checkunfinished(repo, commit=True)
461 checkunfinished(repo, commit=True)
461 wctx = repo[None]
462 wctx = repo[None]
462 merge = len(wctx.parents()) > 1
463 merge = len(wctx.parents()) > 1
463 if merge:
464 if merge:
464 raise error.InputError(
465 raise error.InputError(
465 _(
466 _(
466 b'cannot partially commit a merge '
467 b'cannot partially commit a merge '
467 b'(use "hg commit" instead)'
468 b'(use "hg commit" instead)'
468 )
469 )
469 )
470 )
470
471
471 def fail(f, msg):
472 def fail(f, msg):
472 raise error.InputError(b'%s: %s' % (f, msg))
473 raise error.InputError(b'%s: %s' % (f, msg))
473
474
474 force = opts.get(b'force')
475 force = opts.get(b'force')
475 if not force:
476 if not force:
476 match = matchmod.badmatch(match, fail)
477 match = matchmod.badmatch(match, fail)
477
478
478 status = repo.status(match=match)
479 status = repo.status(match=match)
479
480
480 overrides = {(b'ui', b'commitsubrepos'): True}
481 overrides = {(b'ui', b'commitsubrepos'): True}
481
482
482 with repo.ui.configoverride(overrides, b'record'):
483 with repo.ui.configoverride(overrides, b'record'):
483 # subrepoutil.precommit() modifies the status
484 # subrepoutil.precommit() modifies the status
484 tmpstatus = scmutil.status(
485 tmpstatus = scmutil.status(
485 copymod.copy(status.modified),
486 copymod.copy(status.modified),
486 copymod.copy(status.added),
487 copymod.copy(status.added),
487 copymod.copy(status.removed),
488 copymod.copy(status.removed),
488 copymod.copy(status.deleted),
489 copymod.copy(status.deleted),
489 copymod.copy(status.unknown),
490 copymod.copy(status.unknown),
490 copymod.copy(status.ignored),
491 copymod.copy(status.ignored),
491 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
492 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
492 )
493 )
493
494
494 # Force allows -X subrepo to skip the subrepo.
495 # Force allows -X subrepo to skip the subrepo.
495 subs, commitsubs, newstate = subrepoutil.precommit(
496 subs, commitsubs, newstate = subrepoutil.precommit(
496 repo.ui, wctx, tmpstatus, match, force=True
497 repo.ui, wctx, tmpstatus, match, force=True
497 )
498 )
498 for s in subs:
499 for s in subs:
499 if s in commitsubs:
500 if s in commitsubs:
500 dirtyreason = wctx.sub(s).dirtyreason(True)
501 dirtyreason = wctx.sub(s).dirtyreason(True)
501 raise error.Abort(dirtyreason)
502 raise error.Abort(dirtyreason)
502
503
503 if not force:
504 if not force:
504 repo.checkcommitpatterns(wctx, match, status, fail)
505 repo.checkcommitpatterns(wctx, match, status, fail)
505 diffopts = patch.difffeatureopts(
506 diffopts = patch.difffeatureopts(
506 ui,
507 ui,
507 opts=opts,
508 opts=opts,
508 whitespace=True,
509 whitespace=True,
509 section=b'commands',
510 section=b'commands',
510 configprefix=b'commit.interactive.',
511 configprefix=b'commit.interactive.',
511 )
512 )
512 diffopts.nodates = True
513 diffopts.nodates = True
513 diffopts.git = True
514 diffopts.git = True
514 diffopts.showfunc = True
515 diffopts.showfunc = True
515 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
516 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
516 originalchunks = patch.parsepatch(originaldiff)
517 originalchunks = patch.parsepatch(originaldiff)
517 match = scmutil.match(repo[None], pats)
518 match = scmutil.match(repo[None], pats)
518
519
519 # 1. filter patch, since we are intending to apply subset of it
520 # 1. filter patch, since we are intending to apply subset of it
520 try:
521 try:
521 chunks, newopts = filterfn(ui, originalchunks, match)
522 chunks, newopts = filterfn(ui, originalchunks, match)
522 except error.PatchError as err:
523 except error.PatchError as err:
523 raise error.InputError(_(b'error parsing patch: %s') % err)
524 raise error.InputError(_(b'error parsing patch: %s') % err)
524 opts.update(newopts)
525 opts.update(newopts)
525
526
526 # We need to keep a backup of files that have been newly added and
527 # We need to keep a backup of files that have been newly added and
527 # modified during the recording process because there is a previous
528 # modified during the recording process because there is a previous
528 # version without the edit in the workdir. We also will need to restore
529 # version without the edit in the workdir. We also will need to restore
529 # files that were the sources of renames so that the patch application
530 # files that were the sources of renames so that the patch application
530 # works.
531 # works.
531 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
532 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
532 chunks, originalchunks
533 chunks, originalchunks
533 )
534 )
534 contenders = set()
535 contenders = set()
535 for h in chunks:
536 for h in chunks:
536 try:
537 try:
537 contenders.update(set(h.files()))
538 contenders.update(set(h.files()))
538 except AttributeError:
539 except AttributeError:
539 pass
540 pass
540
541
541 changed = status.modified + status.added + status.removed
542 changed = status.modified + status.added + status.removed
542 newfiles = [f for f in changed if f in contenders]
543 newfiles = [f for f in changed if f in contenders]
543 if not newfiles:
544 if not newfiles:
544 ui.status(_(b'no changes to record\n'))
545 ui.status(_(b'no changes to record\n'))
545 return 0
546 return 0
546
547
547 modified = set(status.modified)
548 modified = set(status.modified)
548
549
549 # 2. backup changed files, so we can restore them in the end
550 # 2. backup changed files, so we can restore them in the end
550
551
551 if backupall:
552 if backupall:
552 tobackup = changed
553 tobackup = changed
553 else:
554 else:
554 tobackup = [
555 tobackup = [
555 f
556 f
556 for f in newfiles
557 for f in newfiles
557 if f in modified or f in newlyaddedandmodifiedfiles
558 if f in modified or f in newlyaddedandmodifiedfiles
558 ]
559 ]
559 backups = {}
560 backups = {}
560 if tobackup:
561 if tobackup:
561 backupdir = repo.vfs.join(b'record-backups')
562 backupdir = repo.vfs.join(b'record-backups')
562 try:
563 try:
563 os.mkdir(backupdir)
564 os.mkdir(backupdir)
564 except OSError as err:
565 except OSError as err:
565 if err.errno != errno.EEXIST:
566 if err.errno != errno.EEXIST:
566 raise
567 raise
567 try:
568 try:
568 # backup continues
569 # backup continues
569 for f in tobackup:
570 for f in tobackup:
570 fd, tmpname = pycompat.mkstemp(
571 fd, tmpname = pycompat.mkstemp(
571 prefix=os.path.basename(f) + b'.', dir=backupdir
572 prefix=os.path.basename(f) + b'.', dir=backupdir
572 )
573 )
573 os.close(fd)
574 os.close(fd)
574 ui.debug(b'backup %r as %r\n' % (f, tmpname))
575 ui.debug(b'backup %r as %r\n' % (f, tmpname))
575 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
576 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
576 backups[f] = tmpname
577 backups[f] = tmpname
577
578
578 fp = stringio()
579 fp = stringio()
579 for c in chunks:
580 for c in chunks:
580 fname = c.filename()
581 fname = c.filename()
581 if fname in backups:
582 if fname in backups:
582 c.write(fp)
583 c.write(fp)
583 dopatch = fp.tell()
584 dopatch = fp.tell()
584 fp.seek(0)
585 fp.seek(0)
585
586
586 # 2.5 optionally review / modify patch in text editor
587 # 2.5 optionally review / modify patch in text editor
587 if opts.get(b'review', False):
588 if opts.get(b'review', False):
588 patchtext = (
589 patchtext = (
589 crecordmod.diffhelptext
590 crecordmod.diffhelptext
590 + crecordmod.patchhelptext
591 + crecordmod.patchhelptext
591 + fp.read()
592 + fp.read()
592 )
593 )
593 reviewedpatch = ui.edit(
594 reviewedpatch = ui.edit(
594 patchtext, b"", action=b"diff", repopath=repo.path
595 patchtext, b"", action=b"diff", repopath=repo.path
595 )
596 )
596 fp.truncate(0)
597 fp.truncate(0)
597 fp.write(reviewedpatch)
598 fp.write(reviewedpatch)
598 fp.seek(0)
599 fp.seek(0)
599
600
600 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
601 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
601 # 3a. apply filtered patch to clean repo (clean)
602 # 3a. apply filtered patch to clean repo (clean)
602 if backups:
603 if backups:
603 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
604 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
604 mergemod.revert_to(repo[b'.'], matcher=m)
605 mergemod.revert_to(repo[b'.'], matcher=m)
605
606
606 # 3b. (apply)
607 # 3b. (apply)
607 if dopatch:
608 if dopatch:
608 try:
609 try:
609 ui.debug(b'applying patch\n')
610 ui.debug(b'applying patch\n')
610 ui.debug(fp.getvalue())
611 ui.debug(fp.getvalue())
611 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
612 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
612 except error.PatchError as err:
613 except error.PatchError as err:
613 raise error.InputError(pycompat.bytestr(err))
614 raise error.InputError(pycompat.bytestr(err))
614 del fp
615 del fp
615
616
616 # 4. We prepared working directory according to filtered
617 # 4. We prepared working directory according to filtered
617 # patch. Now is the time to delegate the job to
618 # patch. Now is the time to delegate the job to
618 # commit/qrefresh or the like!
619 # commit/qrefresh or the like!
619
620
620 # Make all of the pathnames absolute.
621 # Make all of the pathnames absolute.
621 newfiles = [repo.wjoin(nf) for nf in newfiles]
622 newfiles = [repo.wjoin(nf) for nf in newfiles]
622 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
623 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
623 finally:
624 finally:
624 # 5. finally restore backed-up files
625 # 5. finally restore backed-up files
625 try:
626 try:
626 dirstate = repo.dirstate
627 dirstate = repo.dirstate
627 for realname, tmpname in pycompat.iteritems(backups):
628 for realname, tmpname in pycompat.iteritems(backups):
628 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
629 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
629
630
630 if dirstate[realname] == b'n':
631 if dirstate[realname] == b'n':
631 # without normallookup, restoring timestamp
632 # without normallookup, restoring timestamp
632 # may cause partially committed files
633 # may cause partially committed files
633 # to be treated as unmodified
634 # to be treated as unmodified
634 dirstate.normallookup(realname)
635 dirstate.normallookup(realname)
635
636
636 # copystat=True here and above are a hack to trick any
637 # copystat=True here and above are a hack to trick any
637 # editors that have f open that we haven't modified them.
638 # editors that have f open that we haven't modified them.
638 #
639 #
639 # Also note that this racy as an editor could notice the
640 # Also note that this racy as an editor could notice the
640 # file's mtime before we've finished writing it.
641 # file's mtime before we've finished writing it.
641 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
642 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
642 os.unlink(tmpname)
643 os.unlink(tmpname)
643 if tobackup:
644 if tobackup:
644 os.rmdir(backupdir)
645 os.rmdir(backupdir)
645 except OSError:
646 except OSError:
646 pass
647 pass
647
648
648 def recordinwlock(ui, repo, message, match, opts):
649 def recordinwlock(ui, repo, message, match, opts):
649 with repo.wlock():
650 with repo.wlock():
650 return recordfunc(ui, repo, message, match, opts)
651 return recordfunc(ui, repo, message, match, opts)
651
652
652 return commit(ui, repo, recordinwlock, pats, opts)
653 return commit(ui, repo, recordinwlock, pats, opts)
653
654
654
655
655 class dirnode(object):
656 class dirnode(object):
656 """
657 """
657 Represent a directory in user working copy with information required for
658 Represent a directory in user working copy with information required for
658 the purpose of tersing its status.
659 the purpose of tersing its status.
659
660
660 path is the path to the directory, without a trailing '/'
661 path is the path to the directory, without a trailing '/'
661
662
662 statuses is a set of statuses of all files in this directory (this includes
663 statuses is a set of statuses of all files in this directory (this includes
663 all the files in all the subdirectories too)
664 all the files in all the subdirectories too)
664
665
665 files is a list of files which are direct child of this directory
666 files is a list of files which are direct child of this directory
666
667
667 subdirs is a dictionary of sub-directory name as the key and it's own
668 subdirs is a dictionary of sub-directory name as the key and it's own
668 dirnode object as the value
669 dirnode object as the value
669 """
670 """
670
671
671 def __init__(self, dirpath):
672 def __init__(self, dirpath):
672 self.path = dirpath
673 self.path = dirpath
673 self.statuses = set()
674 self.statuses = set()
674 self.files = []
675 self.files = []
675 self.subdirs = {}
676 self.subdirs = {}
676
677
677 def _addfileindir(self, filename, status):
678 def _addfileindir(self, filename, status):
678 """Add a file in this directory as a direct child."""
679 """Add a file in this directory as a direct child."""
679 self.files.append((filename, status))
680 self.files.append((filename, status))
680
681
681 def addfile(self, filename, status):
682 def addfile(self, filename, status):
682 """
683 """
683 Add a file to this directory or to its direct parent directory.
684 Add a file to this directory or to its direct parent directory.
684
685
685 If the file is not direct child of this directory, we traverse to the
686 If the file is not direct child of this directory, we traverse to the
686 directory of which this file is a direct child of and add the file
687 directory of which this file is a direct child of and add the file
687 there.
688 there.
688 """
689 """
689
690
690 # the filename contains a path separator, it means it's not the direct
691 # the filename contains a path separator, it means it's not the direct
691 # child of this directory
692 # child of this directory
692 if b'/' in filename:
693 if b'/' in filename:
693 subdir, filep = filename.split(b'/', 1)
694 subdir, filep = filename.split(b'/', 1)
694
695
695 # does the dirnode object for subdir exists
696 # does the dirnode object for subdir exists
696 if subdir not in self.subdirs:
697 if subdir not in self.subdirs:
697 subdirpath = pathutil.join(self.path, subdir)
698 subdirpath = pathutil.join(self.path, subdir)
698 self.subdirs[subdir] = dirnode(subdirpath)
699 self.subdirs[subdir] = dirnode(subdirpath)
699
700
700 # try adding the file in subdir
701 # try adding the file in subdir
701 self.subdirs[subdir].addfile(filep, status)
702 self.subdirs[subdir].addfile(filep, status)
702
703
703 else:
704 else:
704 self._addfileindir(filename, status)
705 self._addfileindir(filename, status)
705
706
706 if status not in self.statuses:
707 if status not in self.statuses:
707 self.statuses.add(status)
708 self.statuses.add(status)
708
709
709 def iterfilepaths(self):
710 def iterfilepaths(self):
710 """Yield (status, path) for files directly under this directory."""
711 """Yield (status, path) for files directly under this directory."""
711 for f, st in self.files:
712 for f, st in self.files:
712 yield st, pathutil.join(self.path, f)
713 yield st, pathutil.join(self.path, f)
713
714
714 def tersewalk(self, terseargs):
715 def tersewalk(self, terseargs):
715 """
716 """
716 Yield (status, path) obtained by processing the status of this
717 Yield (status, path) obtained by processing the status of this
717 dirnode.
718 dirnode.
718
719
719 terseargs is the string of arguments passed by the user with `--terse`
720 terseargs is the string of arguments passed by the user with `--terse`
720 flag.
721 flag.
721
722
722 Following are the cases which can happen:
723 Following are the cases which can happen:
723
724
724 1) All the files in the directory (including all the files in its
725 1) All the files in the directory (including all the files in its
725 subdirectories) share the same status and the user has asked us to terse
726 subdirectories) share the same status and the user has asked us to terse
726 that status. -> yield (status, dirpath). dirpath will end in '/'.
727 that status. -> yield (status, dirpath). dirpath will end in '/'.
727
728
728 2) Otherwise, we do following:
729 2) Otherwise, we do following:
729
730
730 a) Yield (status, filepath) for all the files which are in this
731 a) Yield (status, filepath) for all the files which are in this
731 directory (only the ones in this directory, not the subdirs)
732 directory (only the ones in this directory, not the subdirs)
732
733
733 b) Recurse the function on all the subdirectories of this
734 b) Recurse the function on all the subdirectories of this
734 directory
735 directory
735 """
736 """
736
737
737 if len(self.statuses) == 1:
738 if len(self.statuses) == 1:
738 onlyst = self.statuses.pop()
739 onlyst = self.statuses.pop()
739
740
740 # Making sure we terse only when the status abbreviation is
741 # Making sure we terse only when the status abbreviation is
741 # passed as terse argument
742 # passed as terse argument
742 if onlyst in terseargs:
743 if onlyst in terseargs:
743 yield onlyst, self.path + b'/'
744 yield onlyst, self.path + b'/'
744 return
745 return
745
746
746 # add the files to status list
747 # add the files to status list
747 for st, fpath in self.iterfilepaths():
748 for st, fpath in self.iterfilepaths():
748 yield st, fpath
749 yield st, fpath
749
750
750 # recurse on the subdirs
751 # recurse on the subdirs
751 for dirobj in self.subdirs.values():
752 for dirobj in self.subdirs.values():
752 for st, fpath in dirobj.tersewalk(terseargs):
753 for st, fpath in dirobj.tersewalk(terseargs):
753 yield st, fpath
754 yield st, fpath
754
755
755
756
756 def tersedir(statuslist, terseargs):
757 def tersedir(statuslist, terseargs):
757 """
758 """
758 Terse the status if all the files in a directory shares the same status.
759 Terse the status if all the files in a directory shares the same status.
759
760
760 statuslist is scmutil.status() object which contains a list of files for
761 statuslist is scmutil.status() object which contains a list of files for
761 each status.
762 each status.
762 terseargs is string which is passed by the user as the argument to `--terse`
763 terseargs is string which is passed by the user as the argument to `--terse`
763 flag.
764 flag.
764
765
765 The function makes a tree of objects of dirnode class, and at each node it
766 The function makes a tree of objects of dirnode class, and at each node it
766 stores the information required to know whether we can terse a certain
767 stores the information required to know whether we can terse a certain
767 directory or not.
768 directory or not.
768 """
769 """
769 # the order matters here as that is used to produce final list
770 # the order matters here as that is used to produce final list
770 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
771 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
771
772
772 # checking the argument validity
773 # checking the argument validity
773 for s in pycompat.bytestr(terseargs):
774 for s in pycompat.bytestr(terseargs):
774 if s not in allst:
775 if s not in allst:
775 raise error.InputError(_(b"'%s' not recognized") % s)
776 raise error.InputError(_(b"'%s' not recognized") % s)
776
777
777 # creating a dirnode object for the root of the repo
778 # creating a dirnode object for the root of the repo
778 rootobj = dirnode(b'')
779 rootobj = dirnode(b'')
779 pstatus = (
780 pstatus = (
780 b'modified',
781 b'modified',
781 b'added',
782 b'added',
782 b'deleted',
783 b'deleted',
783 b'clean',
784 b'clean',
784 b'unknown',
785 b'unknown',
785 b'ignored',
786 b'ignored',
786 b'removed',
787 b'removed',
787 )
788 )
788
789
789 tersedict = {}
790 tersedict = {}
790 for attrname in pstatus:
791 for attrname in pstatus:
791 statuschar = attrname[0:1]
792 statuschar = attrname[0:1]
792 for f in getattr(statuslist, attrname):
793 for f in getattr(statuslist, attrname):
793 rootobj.addfile(f, statuschar)
794 rootobj.addfile(f, statuschar)
794 tersedict[statuschar] = []
795 tersedict[statuschar] = []
795
796
796 # we won't be tersing the root dir, so add files in it
797 # we won't be tersing the root dir, so add files in it
797 for st, fpath in rootobj.iterfilepaths():
798 for st, fpath in rootobj.iterfilepaths():
798 tersedict[st].append(fpath)
799 tersedict[st].append(fpath)
799
800
800 # process each sub-directory and build tersedict
801 # process each sub-directory and build tersedict
801 for subdir in rootobj.subdirs.values():
802 for subdir in rootobj.subdirs.values():
802 for st, f in subdir.tersewalk(terseargs):
803 for st, f in subdir.tersewalk(terseargs):
803 tersedict[st].append(f)
804 tersedict[st].append(f)
804
805
805 tersedlist = []
806 tersedlist = []
806 for st in allst:
807 for st in allst:
807 tersedict[st].sort()
808 tersedict[st].sort()
808 tersedlist.append(tersedict[st])
809 tersedlist.append(tersedict[st])
809
810
810 return scmutil.status(*tersedlist)
811 return scmutil.status(*tersedlist)
811
812
812
813
813 def _commentlines(raw):
814 def _commentlines(raw):
814 '''Surround lineswith a comment char and a new line'''
815 '''Surround lineswith a comment char and a new line'''
815 lines = raw.splitlines()
816 lines = raw.splitlines()
816 commentedlines = [b'# %s' % line for line in lines]
817 commentedlines = [b'# %s' % line for line in lines]
817 return b'\n'.join(commentedlines) + b'\n'
818 return b'\n'.join(commentedlines) + b'\n'
818
819
819
820
820 @attr.s(frozen=True)
821 @attr.s(frozen=True)
821 class morestatus(object):
822 class morestatus(object):
822 reporoot = attr.ib()
823 reporoot = attr.ib()
823 unfinishedop = attr.ib()
824 unfinishedop = attr.ib()
824 unfinishedmsg = attr.ib()
825 unfinishedmsg = attr.ib()
825 activemerge = attr.ib()
826 activemerge = attr.ib()
826 unresolvedpaths = attr.ib()
827 unresolvedpaths = attr.ib()
827 _formattedpaths = attr.ib(init=False, default=set())
828 _formattedpaths = attr.ib(init=False, default=set())
828 _label = b'status.morestatus'
829 _label = b'status.morestatus'
829
830
830 def formatfile(self, path, fm):
831 def formatfile(self, path, fm):
831 self._formattedpaths.add(path)
832 self._formattedpaths.add(path)
832 if self.activemerge and path in self.unresolvedpaths:
833 if self.activemerge and path in self.unresolvedpaths:
833 fm.data(unresolved=True)
834 fm.data(unresolved=True)
834
835
835 def formatfooter(self, fm):
836 def formatfooter(self, fm):
836 if self.unfinishedop or self.unfinishedmsg:
837 if self.unfinishedop or self.unfinishedmsg:
837 fm.startitem()
838 fm.startitem()
838 fm.data(itemtype=b'morestatus')
839 fm.data(itemtype=b'morestatus')
839
840
840 if self.unfinishedop:
841 if self.unfinishedop:
841 fm.data(unfinished=self.unfinishedop)
842 fm.data(unfinished=self.unfinishedop)
842 statemsg = (
843 statemsg = (
843 _(b'The repository is in an unfinished *%s* state.')
844 _(b'The repository is in an unfinished *%s* state.')
844 % self.unfinishedop
845 % self.unfinishedop
845 )
846 )
846 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
847 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
847 if self.unfinishedmsg:
848 if self.unfinishedmsg:
848 fm.data(unfinishedmsg=self.unfinishedmsg)
849 fm.data(unfinishedmsg=self.unfinishedmsg)
849
850
850 # May also start new data items.
851 # May also start new data items.
851 self._formatconflicts(fm)
852 self._formatconflicts(fm)
852
853
853 if self.unfinishedmsg:
854 if self.unfinishedmsg:
854 fm.plain(
855 fm.plain(
855 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
856 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
856 )
857 )
857
858
858 def _formatconflicts(self, fm):
859 def _formatconflicts(self, fm):
859 if not self.activemerge:
860 if not self.activemerge:
860 return
861 return
861
862
862 if self.unresolvedpaths:
863 if self.unresolvedpaths:
863 mergeliststr = b'\n'.join(
864 mergeliststr = b'\n'.join(
864 [
865 [
865 b' %s'
866 b' %s'
866 % util.pathto(self.reporoot, encoding.getcwd(), path)
867 % util.pathto(self.reporoot, encoding.getcwd(), path)
867 for path in self.unresolvedpaths
868 for path in self.unresolvedpaths
868 ]
869 ]
869 )
870 )
870 msg = (
871 msg = (
871 _(
872 _(
872 b'''Unresolved merge conflicts:
873 b'''Unresolved merge conflicts:
873
874
874 %s
875 %s
875
876
876 To mark files as resolved: hg resolve --mark FILE'''
877 To mark files as resolved: hg resolve --mark FILE'''
877 )
878 )
878 % mergeliststr
879 % mergeliststr
879 )
880 )
880
881
881 # If any paths with unresolved conflicts were not previously
882 # If any paths with unresolved conflicts were not previously
882 # formatted, output them now.
883 # formatted, output them now.
883 for f in self.unresolvedpaths:
884 for f in self.unresolvedpaths:
884 if f in self._formattedpaths:
885 if f in self._formattedpaths:
885 # Already output.
886 # Already output.
886 continue
887 continue
887 fm.startitem()
888 fm.startitem()
888 # We can't claim to know the status of the file - it may just
889 # We can't claim to know the status of the file - it may just
889 # have been in one of the states that were not requested for
890 # have been in one of the states that were not requested for
890 # display, so it could be anything.
891 # display, so it could be anything.
891 fm.data(itemtype=b'file', path=f, unresolved=True)
892 fm.data(itemtype=b'file', path=f, unresolved=True)
892
893
893 else:
894 else:
894 msg = _(b'No unresolved merge conflicts.')
895 msg = _(b'No unresolved merge conflicts.')
895
896
896 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
897 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
897
898
898
899
899 def readmorestatus(repo):
900 def readmorestatus(repo):
900 """Returns a morestatus object if the repo has unfinished state."""
901 """Returns a morestatus object if the repo has unfinished state."""
901 statetuple = statemod.getrepostate(repo)
902 statetuple = statemod.getrepostate(repo)
902 mergestate = mergestatemod.mergestate.read(repo)
903 mergestate = mergestatemod.mergestate.read(repo)
903 activemerge = mergestate.active()
904 activemerge = mergestate.active()
904 if not statetuple and not activemerge:
905 if not statetuple and not activemerge:
905 return None
906 return None
906
907
907 unfinishedop = unfinishedmsg = unresolved = None
908 unfinishedop = unfinishedmsg = unresolved = None
908 if statetuple:
909 if statetuple:
909 unfinishedop, unfinishedmsg = statetuple
910 unfinishedop, unfinishedmsg = statetuple
910 if activemerge:
911 if activemerge:
911 unresolved = sorted(mergestate.unresolved())
912 unresolved = sorted(mergestate.unresolved())
912 return morestatus(
913 return morestatus(
913 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
914 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
914 )
915 )
915
916
916
917
917 def findpossible(cmd, table, strict=False):
918 def findpossible(cmd, table, strict=False):
918 """
919 """
919 Return cmd -> (aliases, command table entry)
920 Return cmd -> (aliases, command table entry)
920 for each matching command.
921 for each matching command.
921 Return debug commands (or their aliases) only if no normal command matches.
922 Return debug commands (or their aliases) only if no normal command matches.
922 """
923 """
923 choice = {}
924 choice = {}
924 debugchoice = {}
925 debugchoice = {}
925
926
926 if cmd in table:
927 if cmd in table:
927 # short-circuit exact matches, "log" alias beats "log|history"
928 # short-circuit exact matches, "log" alias beats "log|history"
928 keys = [cmd]
929 keys = [cmd]
929 else:
930 else:
930 keys = table.keys()
931 keys = table.keys()
931
932
932 allcmds = []
933 allcmds = []
933 for e in keys:
934 for e in keys:
934 aliases = parsealiases(e)
935 aliases = parsealiases(e)
935 allcmds.extend(aliases)
936 allcmds.extend(aliases)
936 found = None
937 found = None
937 if cmd in aliases:
938 if cmd in aliases:
938 found = cmd
939 found = cmd
939 elif not strict:
940 elif not strict:
940 for a in aliases:
941 for a in aliases:
941 if a.startswith(cmd):
942 if a.startswith(cmd):
942 found = a
943 found = a
943 break
944 break
944 if found is not None:
945 if found is not None:
945 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
946 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
946 debugchoice[found] = (aliases, table[e])
947 debugchoice[found] = (aliases, table[e])
947 else:
948 else:
948 choice[found] = (aliases, table[e])
949 choice[found] = (aliases, table[e])
949
950
950 if not choice and debugchoice:
951 if not choice and debugchoice:
951 choice = debugchoice
952 choice = debugchoice
952
953
953 return choice, allcmds
954 return choice, allcmds
954
955
955
956
956 def findcmd(cmd, table, strict=True):
957 def findcmd(cmd, table, strict=True):
957 """Return (aliases, command table entry) for command string."""
958 """Return (aliases, command table entry) for command string."""
958 choice, allcmds = findpossible(cmd, table, strict)
959 choice, allcmds = findpossible(cmd, table, strict)
959
960
960 if cmd in choice:
961 if cmd in choice:
961 return choice[cmd]
962 return choice[cmd]
962
963
963 if len(choice) > 1:
964 if len(choice) > 1:
964 clist = sorted(choice)
965 clist = sorted(choice)
965 raise error.AmbiguousCommand(cmd, clist)
966 raise error.AmbiguousCommand(cmd, clist)
966
967
967 if choice:
968 if choice:
968 return list(choice.values())[0]
969 return list(choice.values())[0]
969
970
970 raise error.UnknownCommand(cmd, allcmds)
971 raise error.UnknownCommand(cmd, allcmds)
971
972
972
973
973 def changebranch(ui, repo, revs, label, opts):
974 def changebranch(ui, repo, revs, label, opts):
974 """ Change the branch name of given revs to label """
975 """ Change the branch name of given revs to label """
975
976
976 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
977 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
977 # abort in case of uncommitted merge or dirty wdir
978 # abort in case of uncommitted merge or dirty wdir
978 bailifchanged(repo)
979 bailifchanged(repo)
979 revs = scmutil.revrange(repo, revs)
980 revs = scmutil.revrange(repo, revs)
980 if not revs:
981 if not revs:
981 raise error.InputError(b"empty revision set")
982 raise error.InputError(b"empty revision set")
982 roots = repo.revs(b'roots(%ld)', revs)
983 roots = repo.revs(b'roots(%ld)', revs)
983 if len(roots) > 1:
984 if len(roots) > 1:
984 raise error.InputError(
985 raise error.InputError(
985 _(b"cannot change branch of non-linear revisions")
986 _(b"cannot change branch of non-linear revisions")
986 )
987 )
987 rewriteutil.precheck(repo, revs, b'change branch of')
988 rewriteutil.precheck(repo, revs, b'change branch of')
988
989
989 root = repo[roots.first()]
990 root = repo[roots.first()]
990 rpb = {parent.branch() for parent in root.parents()}
991 rpb = {parent.branch() for parent in root.parents()}
991 if (
992 if (
992 not opts.get(b'force')
993 not opts.get(b'force')
993 and label not in rpb
994 and label not in rpb
994 and label in repo.branchmap()
995 and label in repo.branchmap()
995 ):
996 ):
996 raise error.InputError(
997 raise error.InputError(
997 _(b"a branch of the same name already exists")
998 _(b"a branch of the same name already exists")
998 )
999 )
999
1000
1000 if repo.revs(b'obsolete() and %ld', revs):
1001 if repo.revs(b'obsolete() and %ld', revs):
1001 raise error.InputError(
1002 raise error.InputError(
1002 _(b"cannot change branch of a obsolete changeset")
1003 _(b"cannot change branch of a obsolete changeset")
1003 )
1004 )
1004
1005
1005 # make sure only topological heads
1006 # make sure only topological heads
1006 if repo.revs(b'heads(%ld) - head()', revs):
1007 if repo.revs(b'heads(%ld) - head()', revs):
1007 raise error.InputError(
1008 raise error.InputError(
1008 _(b"cannot change branch in middle of a stack")
1009 _(b"cannot change branch in middle of a stack")
1009 )
1010 )
1010
1011
1011 replacements = {}
1012 replacements = {}
1012 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1013 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1013 # mercurial.subrepo -> mercurial.cmdutil
1014 # mercurial.subrepo -> mercurial.cmdutil
1014 from . import context
1015 from . import context
1015
1016
1016 for rev in revs:
1017 for rev in revs:
1017 ctx = repo[rev]
1018 ctx = repo[rev]
1018 oldbranch = ctx.branch()
1019 oldbranch = ctx.branch()
1019 # check if ctx has same branch
1020 # check if ctx has same branch
1020 if oldbranch == label:
1021 if oldbranch == label:
1021 continue
1022 continue
1022
1023
1023 def filectxfn(repo, newctx, path):
1024 def filectxfn(repo, newctx, path):
1024 try:
1025 try:
1025 return ctx[path]
1026 return ctx[path]
1026 except error.ManifestLookupError:
1027 except error.ManifestLookupError:
1027 return None
1028 return None
1028
1029
1029 ui.debug(
1030 ui.debug(
1030 b"changing branch of '%s' from '%s' to '%s'\n"
1031 b"changing branch of '%s' from '%s' to '%s'\n"
1031 % (hex(ctx.node()), oldbranch, label)
1032 % (hex(ctx.node()), oldbranch, label)
1032 )
1033 )
1033 extra = ctx.extra()
1034 extra = ctx.extra()
1034 extra[b'branch_change'] = hex(ctx.node())
1035 extra[b'branch_change'] = hex(ctx.node())
1035 # While changing branch of set of linear commits, make sure that
1036 # While changing branch of set of linear commits, make sure that
1036 # we base our commits on new parent rather than old parent which
1037 # we base our commits on new parent rather than old parent which
1037 # was obsoleted while changing the branch
1038 # was obsoleted while changing the branch
1038 p1 = ctx.p1().node()
1039 p1 = ctx.p1().node()
1039 p2 = ctx.p2().node()
1040 p2 = ctx.p2().node()
1040 if p1 in replacements:
1041 if p1 in replacements:
1041 p1 = replacements[p1][0]
1042 p1 = replacements[p1][0]
1042 if p2 in replacements:
1043 if p2 in replacements:
1043 p2 = replacements[p2][0]
1044 p2 = replacements[p2][0]
1044
1045
1045 mc = context.memctx(
1046 mc = context.memctx(
1046 repo,
1047 repo,
1047 (p1, p2),
1048 (p1, p2),
1048 ctx.description(),
1049 ctx.description(),
1049 ctx.files(),
1050 ctx.files(),
1050 filectxfn,
1051 filectxfn,
1051 user=ctx.user(),
1052 user=ctx.user(),
1052 date=ctx.date(),
1053 date=ctx.date(),
1053 extra=extra,
1054 extra=extra,
1054 branch=label,
1055 branch=label,
1055 )
1056 )
1056
1057
1057 newnode = repo.commitctx(mc)
1058 newnode = repo.commitctx(mc)
1058 replacements[ctx.node()] = (newnode,)
1059 replacements[ctx.node()] = (newnode,)
1059 ui.debug(b'new node id is %s\n' % hex(newnode))
1060 ui.debug(b'new node id is %s\n' % hex(newnode))
1060
1061
1061 # create obsmarkers and move bookmarks
1062 # create obsmarkers and move bookmarks
1062 scmutil.cleanupnodes(
1063 scmutil.cleanupnodes(
1063 repo, replacements, b'branch-change', fixphase=True
1064 repo, replacements, b'branch-change', fixphase=True
1064 )
1065 )
1065
1066
1066 # move the working copy too
1067 # move the working copy too
1067 wctx = repo[None]
1068 wctx = repo[None]
1068 # in-progress merge is a bit too complex for now.
1069 # in-progress merge is a bit too complex for now.
1069 if len(wctx.parents()) == 1:
1070 if len(wctx.parents()) == 1:
1070 newid = replacements.get(wctx.p1().node())
1071 newid = replacements.get(wctx.p1().node())
1071 if newid is not None:
1072 if newid is not None:
1072 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1073 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1073 # mercurial.cmdutil
1074 # mercurial.cmdutil
1074 from . import hg
1075 from . import hg
1075
1076
1076 hg.update(repo, newid[0], quietempty=True)
1077 hg.update(repo, newid[0], quietempty=True)
1077
1078
1078 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1079 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1079
1080
1080
1081
1081 def findrepo(p):
1082 def findrepo(p):
1082 while not os.path.isdir(os.path.join(p, b".hg")):
1083 while not os.path.isdir(os.path.join(p, b".hg")):
1083 oldp, p = p, os.path.dirname(p)
1084 oldp, p = p, os.path.dirname(p)
1084 if p == oldp:
1085 if p == oldp:
1085 return None
1086 return None
1086
1087
1087 return p
1088 return p
1088
1089
1089
1090
1090 def bailifchanged(repo, merge=True, hint=None):
1091 def bailifchanged(repo, merge=True, hint=None):
1091 """enforce the precondition that working directory must be clean.
1092 """enforce the precondition that working directory must be clean.
1092
1093
1093 'merge' can be set to false if a pending uncommitted merge should be
1094 'merge' can be set to false if a pending uncommitted merge should be
1094 ignored (such as when 'update --check' runs).
1095 ignored (such as when 'update --check' runs).
1095
1096
1096 'hint' is the usual hint given to Abort exception.
1097 'hint' is the usual hint given to Abort exception.
1097 """
1098 """
1098
1099
1099 if merge and repo.dirstate.p2() != nullid:
1100 if merge and repo.dirstate.p2() != nullid:
1100 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1101 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1101 st = repo.status()
1102 st = repo.status()
1102 if st.modified or st.added or st.removed or st.deleted:
1103 if st.modified or st.added or st.removed or st.deleted:
1103 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1104 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1104 ctx = repo[None]
1105 ctx = repo[None]
1105 for s in sorted(ctx.substate):
1106 for s in sorted(ctx.substate):
1106 ctx.sub(s).bailifchanged(hint=hint)
1107 ctx.sub(s).bailifchanged(hint=hint)
1107
1108
1108
1109
1109 def logmessage(ui, opts):
1110 def logmessage(ui, opts):
1110 """ get the log message according to -m and -l option """
1111 """ get the log message according to -m and -l option """
1111
1112
1112 check_at_most_one_arg(opts, b'message', b'logfile')
1113 check_at_most_one_arg(opts, b'message', b'logfile')
1113
1114
1114 message = opts.get(b'message')
1115 message = opts.get(b'message')
1115 logfile = opts.get(b'logfile')
1116 logfile = opts.get(b'logfile')
1116
1117
1117 if not message and logfile:
1118 if not message and logfile:
1118 try:
1119 try:
1119 if isstdiofilename(logfile):
1120 if isstdiofilename(logfile):
1120 message = ui.fin.read()
1121 message = ui.fin.read()
1121 else:
1122 else:
1122 message = b'\n'.join(util.readfile(logfile).splitlines())
1123 message = b'\n'.join(util.readfile(logfile).splitlines())
1123 except IOError as inst:
1124 except IOError as inst:
1124 raise error.Abort(
1125 raise error.Abort(
1125 _(b"can't read commit message '%s': %s")
1126 _(b"can't read commit message '%s': %s")
1126 % (logfile, encoding.strtolocal(inst.strerror))
1127 % (logfile, encoding.strtolocal(inst.strerror))
1127 )
1128 )
1128 return message
1129 return message
1129
1130
1130
1131
1131 def mergeeditform(ctxorbool, baseformname):
1132 def mergeeditform(ctxorbool, baseformname):
1132 """return appropriate editform name (referencing a committemplate)
1133 """return appropriate editform name (referencing a committemplate)
1133
1134
1134 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1135 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1135 merging is committed.
1136 merging is committed.
1136
1137
1137 This returns baseformname with '.merge' appended if it is a merge,
1138 This returns baseformname with '.merge' appended if it is a merge,
1138 otherwise '.normal' is appended.
1139 otherwise '.normal' is appended.
1139 """
1140 """
1140 if isinstance(ctxorbool, bool):
1141 if isinstance(ctxorbool, bool):
1141 if ctxorbool:
1142 if ctxorbool:
1142 return baseformname + b".merge"
1143 return baseformname + b".merge"
1143 elif len(ctxorbool.parents()) > 1:
1144 elif len(ctxorbool.parents()) > 1:
1144 return baseformname + b".merge"
1145 return baseformname + b".merge"
1145
1146
1146 return baseformname + b".normal"
1147 return baseformname + b".normal"
1147
1148
1148
1149
1149 def getcommiteditor(
1150 def getcommiteditor(
1150 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1151 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1151 ):
1152 ):
1152 """get appropriate commit message editor according to '--edit' option
1153 """get appropriate commit message editor according to '--edit' option
1153
1154
1154 'finishdesc' is a function to be called with edited commit message
1155 'finishdesc' is a function to be called with edited commit message
1155 (= 'description' of the new changeset) just after editing, but
1156 (= 'description' of the new changeset) just after editing, but
1156 before checking empty-ness. It should return actual text to be
1157 before checking empty-ness. It should return actual text to be
1157 stored into history. This allows to change description before
1158 stored into history. This allows to change description before
1158 storing.
1159 storing.
1159
1160
1160 'extramsg' is a extra message to be shown in the editor instead of
1161 'extramsg' is a extra message to be shown in the editor instead of
1161 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1162 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1162 is automatically added.
1163 is automatically added.
1163
1164
1164 'editform' is a dot-separated list of names, to distinguish
1165 'editform' is a dot-separated list of names, to distinguish
1165 the purpose of commit text editing.
1166 the purpose of commit text editing.
1166
1167
1167 'getcommiteditor' returns 'commitforceeditor' regardless of
1168 'getcommiteditor' returns 'commitforceeditor' regardless of
1168 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1169 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1169 they are specific for usage in MQ.
1170 they are specific for usage in MQ.
1170 """
1171 """
1171 if edit or finishdesc or extramsg:
1172 if edit or finishdesc or extramsg:
1172 return lambda r, c, s: commitforceeditor(
1173 return lambda r, c, s: commitforceeditor(
1173 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1174 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1174 )
1175 )
1175 elif editform:
1176 elif editform:
1176 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1177 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1177 else:
1178 else:
1178 return commiteditor
1179 return commiteditor
1179
1180
1180
1181
1181 def _escapecommandtemplate(tmpl):
1182 def _escapecommandtemplate(tmpl):
1182 parts = []
1183 parts = []
1183 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1184 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1184 if typ == b'string':
1185 if typ == b'string':
1185 parts.append(stringutil.escapestr(tmpl[start:end]))
1186 parts.append(stringutil.escapestr(tmpl[start:end]))
1186 else:
1187 else:
1187 parts.append(tmpl[start:end])
1188 parts.append(tmpl[start:end])
1188 return b''.join(parts)
1189 return b''.join(parts)
1189
1190
1190
1191
1191 def rendercommandtemplate(ui, tmpl, props):
1192 def rendercommandtemplate(ui, tmpl, props):
1192 r"""Expand a literal template 'tmpl' in a way suitable for command line
1193 r"""Expand a literal template 'tmpl' in a way suitable for command line
1193
1194
1194 '\' in outermost string is not taken as an escape character because it
1195 '\' in outermost string is not taken as an escape character because it
1195 is a directory separator on Windows.
1196 is a directory separator on Windows.
1196
1197
1197 >>> from . import ui as uimod
1198 >>> from . import ui as uimod
1198 >>> ui = uimod.ui()
1199 >>> ui = uimod.ui()
1199 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1200 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1200 'c:\\foo'
1201 'c:\\foo'
1201 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1202 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1202 'c:{path}'
1203 'c:{path}'
1203 """
1204 """
1204 if not tmpl:
1205 if not tmpl:
1205 return tmpl
1206 return tmpl
1206 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1207 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1207 return t.renderdefault(props)
1208 return t.renderdefault(props)
1208
1209
1209
1210
1210 def rendertemplate(ctx, tmpl, props=None):
1211 def rendertemplate(ctx, tmpl, props=None):
1211 """Expand a literal template 'tmpl' byte-string against one changeset
1212 """Expand a literal template 'tmpl' byte-string against one changeset
1212
1213
1213 Each props item must be a stringify-able value or a callable returning
1214 Each props item must be a stringify-able value or a callable returning
1214 such value, i.e. no bare list nor dict should be passed.
1215 such value, i.e. no bare list nor dict should be passed.
1215 """
1216 """
1216 repo = ctx.repo()
1217 repo = ctx.repo()
1217 tres = formatter.templateresources(repo.ui, repo)
1218 tres = formatter.templateresources(repo.ui, repo)
1218 t = formatter.maketemplater(
1219 t = formatter.maketemplater(
1219 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1220 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1220 )
1221 )
1221 mapping = {b'ctx': ctx}
1222 mapping = {b'ctx': ctx}
1222 if props:
1223 if props:
1223 mapping.update(props)
1224 mapping.update(props)
1224 return t.renderdefault(mapping)
1225 return t.renderdefault(mapping)
1225
1226
1226
1227
1227 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1228 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1228 """Format a changeset summary (one line)."""
1229 """Format a changeset summary (one line)."""
1229 spec = None
1230 spec = None
1230 if command:
1231 if command:
1231 spec = ui.config(
1232 spec = ui.config(
1232 b'command-templates', b'oneline-summary.%s' % command, None
1233 b'command-templates', b'oneline-summary.%s' % command, None
1233 )
1234 )
1234 if not spec:
1235 if not spec:
1235 spec = ui.config(b'command-templates', b'oneline-summary')
1236 spec = ui.config(b'command-templates', b'oneline-summary')
1236 if not spec:
1237 if not spec:
1237 spec = default_spec
1238 spec = default_spec
1238 if not spec:
1239 if not spec:
1239 spec = (
1240 spec = (
1240 b'{separate(" ", '
1241 b'{separate(" ", '
1241 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1242 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1242 b', '
1243 b', '
1243 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1244 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1244 b')} '
1245 b')} '
1245 b'"{label("oneline-summary.desc", desc|firstline)}"'
1246 b'"{label("oneline-summary.desc", desc|firstline)}"'
1246 )
1247 )
1247 text = rendertemplate(ctx, spec)
1248 text = rendertemplate(ctx, spec)
1248 return text.split(b'\n')[0]
1249 return text.split(b'\n')[0]
1249
1250
1250
1251
1251 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1252 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1252 r"""Convert old-style filename format string to template string
1253 r"""Convert old-style filename format string to template string
1253
1254
1254 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1255 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1255 'foo-{reporoot|basename}-{seqno}.patch'
1256 'foo-{reporoot|basename}-{seqno}.patch'
1256 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1257 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1257 '{rev}{tags % "{tag}"}{node}'
1258 '{rev}{tags % "{tag}"}{node}'
1258
1259
1259 '\' in outermost strings has to be escaped because it is a directory
1260 '\' in outermost strings has to be escaped because it is a directory
1260 separator on Windows:
1261 separator on Windows:
1261
1262
1262 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1263 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1263 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1264 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1264 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1265 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1265 '\\\\\\\\foo\\\\bar.patch'
1266 '\\\\\\\\foo\\\\bar.patch'
1266 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1267 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1267 '\\\\{tags % "{tag}"}'
1268 '\\\\{tags % "{tag}"}'
1268
1269
1269 but inner strings follow the template rules (i.e. '\' is taken as an
1270 but inner strings follow the template rules (i.e. '\' is taken as an
1270 escape character):
1271 escape character):
1271
1272
1272 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1273 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1273 '{"c:\\tmp"}'
1274 '{"c:\\tmp"}'
1274 """
1275 """
1275 expander = {
1276 expander = {
1276 b'H': b'{node}',
1277 b'H': b'{node}',
1277 b'R': b'{rev}',
1278 b'R': b'{rev}',
1278 b'h': b'{node|short}',
1279 b'h': b'{node|short}',
1279 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1280 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1280 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1281 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1281 b'%': b'%',
1282 b'%': b'%',
1282 b'b': b'{reporoot|basename}',
1283 b'b': b'{reporoot|basename}',
1283 }
1284 }
1284 if total is not None:
1285 if total is not None:
1285 expander[b'N'] = b'{total}'
1286 expander[b'N'] = b'{total}'
1286 if seqno is not None:
1287 if seqno is not None:
1287 expander[b'n'] = b'{seqno}'
1288 expander[b'n'] = b'{seqno}'
1288 if total is not None and seqno is not None:
1289 if total is not None and seqno is not None:
1289 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1290 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1290 if pathname is not None:
1291 if pathname is not None:
1291 expander[b's'] = b'{pathname|basename}'
1292 expander[b's'] = b'{pathname|basename}'
1292 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1293 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1293 expander[b'p'] = b'{pathname}'
1294 expander[b'p'] = b'{pathname}'
1294
1295
1295 newname = []
1296 newname = []
1296 for typ, start, end in templater.scantemplate(pat, raw=True):
1297 for typ, start, end in templater.scantemplate(pat, raw=True):
1297 if typ != b'string':
1298 if typ != b'string':
1298 newname.append(pat[start:end])
1299 newname.append(pat[start:end])
1299 continue
1300 continue
1300 i = start
1301 i = start
1301 while i < end:
1302 while i < end:
1302 n = pat.find(b'%', i, end)
1303 n = pat.find(b'%', i, end)
1303 if n < 0:
1304 if n < 0:
1304 newname.append(stringutil.escapestr(pat[i:end]))
1305 newname.append(stringutil.escapestr(pat[i:end]))
1305 break
1306 break
1306 newname.append(stringutil.escapestr(pat[i:n]))
1307 newname.append(stringutil.escapestr(pat[i:n]))
1307 if n + 2 > end:
1308 if n + 2 > end:
1308 raise error.Abort(
1309 raise error.Abort(
1309 _(b"incomplete format spec in output filename")
1310 _(b"incomplete format spec in output filename")
1310 )
1311 )
1311 c = pat[n + 1 : n + 2]
1312 c = pat[n + 1 : n + 2]
1312 i = n + 2
1313 i = n + 2
1313 try:
1314 try:
1314 newname.append(expander[c])
1315 newname.append(expander[c])
1315 except KeyError:
1316 except KeyError:
1316 raise error.Abort(
1317 raise error.Abort(
1317 _(b"invalid format spec '%%%s' in output filename") % c
1318 _(b"invalid format spec '%%%s' in output filename") % c
1318 )
1319 )
1319 return b''.join(newname)
1320 return b''.join(newname)
1320
1321
1321
1322
1322 def makefilename(ctx, pat, **props):
1323 def makefilename(ctx, pat, **props):
1323 if not pat:
1324 if not pat:
1324 return pat
1325 return pat
1325 tmpl = _buildfntemplate(pat, **props)
1326 tmpl = _buildfntemplate(pat, **props)
1326 # BUG: alias expansion shouldn't be made against template fragments
1327 # BUG: alias expansion shouldn't be made against template fragments
1327 # rewritten from %-format strings, but we have no easy way to partially
1328 # rewritten from %-format strings, but we have no easy way to partially
1328 # disable the expansion.
1329 # disable the expansion.
1329 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1330 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1330
1331
1331
1332
1332 def isstdiofilename(pat):
1333 def isstdiofilename(pat):
1333 """True if the given pat looks like a filename denoting stdin/stdout"""
1334 """True if the given pat looks like a filename denoting stdin/stdout"""
1334 return not pat or pat == b'-'
1335 return not pat or pat == b'-'
1335
1336
1336
1337
1337 class _unclosablefile(object):
1338 class _unclosablefile(object):
1338 def __init__(self, fp):
1339 def __init__(self, fp):
1339 self._fp = fp
1340 self._fp = fp
1340
1341
1341 def close(self):
1342 def close(self):
1342 pass
1343 pass
1343
1344
1344 def __iter__(self):
1345 def __iter__(self):
1345 return iter(self._fp)
1346 return iter(self._fp)
1346
1347
1347 def __getattr__(self, attr):
1348 def __getattr__(self, attr):
1348 return getattr(self._fp, attr)
1349 return getattr(self._fp, attr)
1349
1350
1350 def __enter__(self):
1351 def __enter__(self):
1351 return self
1352 return self
1352
1353
1353 def __exit__(self, exc_type, exc_value, exc_tb):
1354 def __exit__(self, exc_type, exc_value, exc_tb):
1354 pass
1355 pass
1355
1356
1356
1357
1357 def makefileobj(ctx, pat, mode=b'wb', **props):
1358 def makefileobj(ctx, pat, mode=b'wb', **props):
1358 writable = mode not in (b'r', b'rb')
1359 writable = mode not in (b'r', b'rb')
1359
1360
1360 if isstdiofilename(pat):
1361 if isstdiofilename(pat):
1361 repo = ctx.repo()
1362 repo = ctx.repo()
1362 if writable:
1363 if writable:
1363 fp = repo.ui.fout
1364 fp = repo.ui.fout
1364 else:
1365 else:
1365 fp = repo.ui.fin
1366 fp = repo.ui.fin
1366 return _unclosablefile(fp)
1367 return _unclosablefile(fp)
1367 fn = makefilename(ctx, pat, **props)
1368 fn = makefilename(ctx, pat, **props)
1368 return open(fn, mode)
1369 return open(fn, mode)
1369
1370
1370
1371
1371 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1372 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1372 """opens the changelog, manifest, a filelog or a given revlog"""
1373 """opens the changelog, manifest, a filelog or a given revlog"""
1373 cl = opts[b'changelog']
1374 cl = opts[b'changelog']
1374 mf = opts[b'manifest']
1375 mf = opts[b'manifest']
1375 dir = opts[b'dir']
1376 dir = opts[b'dir']
1376 msg = None
1377 msg = None
1377 if cl and mf:
1378 if cl and mf:
1378 msg = _(b'cannot specify --changelog and --manifest at the same time')
1379 msg = _(b'cannot specify --changelog and --manifest at the same time')
1379 elif cl and dir:
1380 elif cl and dir:
1380 msg = _(b'cannot specify --changelog and --dir at the same time')
1381 msg = _(b'cannot specify --changelog and --dir at the same time')
1381 elif cl or mf or dir:
1382 elif cl or mf or dir:
1382 if file_:
1383 if file_:
1383 msg = _(b'cannot specify filename with --changelog or --manifest')
1384 msg = _(b'cannot specify filename with --changelog or --manifest')
1384 elif not repo:
1385 elif not repo:
1385 msg = _(
1386 msg = _(
1386 b'cannot specify --changelog or --manifest or --dir '
1387 b'cannot specify --changelog or --manifest or --dir '
1387 b'without a repository'
1388 b'without a repository'
1388 )
1389 )
1389 if msg:
1390 if msg:
1390 raise error.InputError(msg)
1391 raise error.InputError(msg)
1391
1392
1392 r = None
1393 r = None
1393 if repo:
1394 if repo:
1394 if cl:
1395 if cl:
1395 r = repo.unfiltered().changelog
1396 r = repo.unfiltered().changelog
1396 elif dir:
1397 elif dir:
1397 if not scmutil.istreemanifest(repo):
1398 if not scmutil.istreemanifest(repo):
1398 raise error.InputError(
1399 raise error.InputError(
1399 _(
1400 _(
1400 b"--dir can only be used on repos with "
1401 b"--dir can only be used on repos with "
1401 b"treemanifest enabled"
1402 b"treemanifest enabled"
1402 )
1403 )
1403 )
1404 )
1404 if not dir.endswith(b'/'):
1405 if not dir.endswith(b'/'):
1405 dir = dir + b'/'
1406 dir = dir + b'/'
1406 dirlog = repo.manifestlog.getstorage(dir)
1407 dirlog = repo.manifestlog.getstorage(dir)
1407 if len(dirlog):
1408 if len(dirlog):
1408 r = dirlog
1409 r = dirlog
1409 elif mf:
1410 elif mf:
1410 r = repo.manifestlog.getstorage(b'')
1411 r = repo.manifestlog.getstorage(b'')
1411 elif file_:
1412 elif file_:
1412 filelog = repo.file(file_)
1413 filelog = repo.file(file_)
1413 if len(filelog):
1414 if len(filelog):
1414 r = filelog
1415 r = filelog
1415
1416
1416 # Not all storage may be revlogs. If requested, try to return an actual
1417 # Not all storage may be revlogs. If requested, try to return an actual
1417 # revlog instance.
1418 # revlog instance.
1418 if returnrevlog:
1419 if returnrevlog:
1419 if isinstance(r, revlog.revlog):
1420 if isinstance(r, revlog.revlog):
1420 pass
1421 pass
1421 elif util.safehasattr(r, b'_revlog'):
1422 elif util.safehasattr(r, b'_revlog'):
1422 r = r._revlog # pytype: disable=attribute-error
1423 r = r._revlog # pytype: disable=attribute-error
1423 elif r is not None:
1424 elif r is not None:
1424 raise error.InputError(
1425 raise error.InputError(
1425 _(b'%r does not appear to be a revlog') % r
1426 _(b'%r does not appear to be a revlog') % r
1426 )
1427 )
1427
1428
1428 if not r:
1429 if not r:
1429 if not returnrevlog:
1430 if not returnrevlog:
1430 raise error.InputError(_(b'cannot give path to non-revlog'))
1431 raise error.InputError(_(b'cannot give path to non-revlog'))
1431
1432
1432 if not file_:
1433 if not file_:
1433 raise error.CommandError(cmd, _(b'invalid arguments'))
1434 raise error.CommandError(cmd, _(b'invalid arguments'))
1434 if not os.path.isfile(file_):
1435 if not os.path.isfile(file_):
1435 raise error.InputError(_(b"revlog '%s' not found") % file_)
1436 raise error.InputError(_(b"revlog '%s' not found") % file_)
1436 r = revlog.revlog(
1437 r = revlog.revlog(
1437 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1438 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1438 )
1439 )
1439 return r
1440 return r
1440
1441
1441
1442
1442 def openrevlog(repo, cmd, file_, opts):
1443 def openrevlog(repo, cmd, file_, opts):
1443 """Obtain a revlog backing storage of an item.
1444 """Obtain a revlog backing storage of an item.
1444
1445
1445 This is similar to ``openstorage()`` except it always returns a revlog.
1446 This is similar to ``openstorage()`` except it always returns a revlog.
1446
1447
1447 In most cases, a caller cares about the main storage object - not the
1448 In most cases, a caller cares about the main storage object - not the
1448 revlog backing it. Therefore, this function should only be used by code
1449 revlog backing it. Therefore, this function should only be used by code
1449 that needs to examine low-level revlog implementation details. e.g. debug
1450 that needs to examine low-level revlog implementation details. e.g. debug
1450 commands.
1451 commands.
1451 """
1452 """
1452 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1453 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1453
1454
1454
1455
1455 def copy(ui, repo, pats, opts, rename=False):
1456 def copy(ui, repo, pats, opts, rename=False):
1456 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1457 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1457
1458
1458 # called with the repo lock held
1459 # called with the repo lock held
1459 #
1460 #
1460 # hgsep => pathname that uses "/" to separate directories
1461 # hgsep => pathname that uses "/" to separate directories
1461 # ossep => pathname that uses os.sep to separate directories
1462 # ossep => pathname that uses os.sep to separate directories
1462 cwd = repo.getcwd()
1463 cwd = repo.getcwd()
1463 targets = {}
1464 targets = {}
1464 forget = opts.get(b"forget")
1465 forget = opts.get(b"forget")
1465 after = opts.get(b"after")
1466 after = opts.get(b"after")
1466 dryrun = opts.get(b"dry_run")
1467 dryrun = opts.get(b"dry_run")
1467 rev = opts.get(b'at_rev')
1468 rev = opts.get(b'at_rev')
1468 if rev:
1469 if rev:
1469 if not forget and not after:
1470 if not forget and not after:
1470 # TODO: Remove this restriction and make it also create the copy
1471 # TODO: Remove this restriction and make it also create the copy
1471 # targets (and remove the rename source if rename==True).
1472 # targets (and remove the rename source if rename==True).
1472 raise error.InputError(_(b'--at-rev requires --after'))
1473 raise error.InputError(_(b'--at-rev requires --after'))
1473 ctx = scmutil.revsingle(repo, rev)
1474 ctx = scmutil.revsingle(repo, rev)
1474 if len(ctx.parents()) > 1:
1475 if len(ctx.parents()) > 1:
1475 raise error.InputError(
1476 raise error.InputError(
1476 _(b'cannot mark/unmark copy in merge commit')
1477 _(b'cannot mark/unmark copy in merge commit')
1477 )
1478 )
1478 else:
1479 else:
1479 ctx = repo[None]
1480 ctx = repo[None]
1480
1481
1481 pctx = ctx.p1()
1482 pctx = ctx.p1()
1482
1483
1483 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1484 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1484
1485
1485 if forget:
1486 if forget:
1486 if ctx.rev() is None:
1487 if ctx.rev() is None:
1487 new_ctx = ctx
1488 new_ctx = ctx
1488 else:
1489 else:
1489 if len(ctx.parents()) > 1:
1490 if len(ctx.parents()) > 1:
1490 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1491 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1491 # avoid cycle context -> subrepo -> cmdutil
1492 # avoid cycle context -> subrepo -> cmdutil
1492 from . import context
1493 from . import context
1493
1494
1494 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1495 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1495 new_ctx = context.overlayworkingctx(repo)
1496 new_ctx = context.overlayworkingctx(repo)
1496 new_ctx.setbase(ctx.p1())
1497 new_ctx.setbase(ctx.p1())
1497 mergemod.graft(repo, ctx, wctx=new_ctx)
1498 mergemod.graft(repo, ctx, wctx=new_ctx)
1498
1499
1499 match = scmutil.match(ctx, pats, opts)
1500 match = scmutil.match(ctx, pats, opts)
1500
1501
1501 current_copies = ctx.p1copies()
1502 current_copies = ctx.p1copies()
1502 current_copies.update(ctx.p2copies())
1503 current_copies.update(ctx.p2copies())
1503
1504
1504 uipathfn = scmutil.getuipathfn(repo)
1505 uipathfn = scmutil.getuipathfn(repo)
1505 for f in ctx.walk(match):
1506 for f in ctx.walk(match):
1506 if f in current_copies:
1507 if f in current_copies:
1507 new_ctx[f].markcopied(None)
1508 new_ctx[f].markcopied(None)
1508 elif match.exact(f):
1509 elif match.exact(f):
1509 ui.warn(
1510 ui.warn(
1510 _(
1511 _(
1511 b'%s: not unmarking as copy - file is not marked as copied\n'
1512 b'%s: not unmarking as copy - file is not marked as copied\n'
1512 )
1513 )
1513 % uipathfn(f)
1514 % uipathfn(f)
1514 )
1515 )
1515
1516
1516 if ctx.rev() is not None:
1517 if ctx.rev() is not None:
1517 with repo.lock():
1518 with repo.lock():
1518 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1519 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1519 new_node = mem_ctx.commit()
1520 new_node = mem_ctx.commit()
1520
1521
1521 if repo.dirstate.p1() == ctx.node():
1522 if repo.dirstate.p1() == ctx.node():
1522 with repo.dirstate.parentchange():
1523 with repo.dirstate.parentchange():
1523 scmutil.movedirstate(repo, repo[new_node])
1524 scmutil.movedirstate(repo, repo[new_node])
1524 replacements = {ctx.node(): [new_node]}
1525 replacements = {ctx.node(): [new_node]}
1525 scmutil.cleanupnodes(
1526 scmutil.cleanupnodes(
1526 repo, replacements, b'uncopy', fixphase=True
1527 repo, replacements, b'uncopy', fixphase=True
1527 )
1528 )
1528
1529
1529 return
1530 return
1530
1531
1531 pats = scmutil.expandpats(pats)
1532 pats = scmutil.expandpats(pats)
1532 if not pats:
1533 if not pats:
1533 raise error.InputError(_(b'no source or destination specified'))
1534 raise error.InputError(_(b'no source or destination specified'))
1534 if len(pats) == 1:
1535 if len(pats) == 1:
1535 raise error.InputError(_(b'no destination specified'))
1536 raise error.InputError(_(b'no destination specified'))
1536 dest = pats.pop()
1537 dest = pats.pop()
1537
1538
1538 def walkpat(pat):
1539 def walkpat(pat):
1539 srcs = []
1540 srcs = []
1540 # TODO: Inline and simplify the non-working-copy version of this code
1541 # TODO: Inline and simplify the non-working-copy version of this code
1541 # since it shares very little with the working-copy version of it.
1542 # since it shares very little with the working-copy version of it.
1542 ctx_to_walk = ctx if ctx.rev() is None else pctx
1543 ctx_to_walk = ctx if ctx.rev() is None else pctx
1543 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1544 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1544 for abs in ctx_to_walk.walk(m):
1545 for abs in ctx_to_walk.walk(m):
1545 rel = uipathfn(abs)
1546 rel = uipathfn(abs)
1546 exact = m.exact(abs)
1547 exact = m.exact(abs)
1547 if abs not in ctx:
1548 if abs not in ctx:
1548 if abs in pctx:
1549 if abs in pctx:
1549 if not after:
1550 if not after:
1550 if exact:
1551 if exact:
1551 ui.warn(
1552 ui.warn(
1552 _(
1553 _(
1553 b'%s: not copying - file has been marked '
1554 b'%s: not copying - file has been marked '
1554 b'for remove\n'
1555 b'for remove\n'
1555 )
1556 )
1556 % rel
1557 % rel
1557 )
1558 )
1558 continue
1559 continue
1559 else:
1560 else:
1560 if exact:
1561 if exact:
1561 ui.warn(
1562 ui.warn(
1562 _(b'%s: not copying - file is not managed\n') % rel
1563 _(b'%s: not copying - file is not managed\n') % rel
1563 )
1564 )
1564 continue
1565 continue
1565
1566
1566 # abs: hgsep
1567 # abs: hgsep
1567 # rel: ossep
1568 # rel: ossep
1568 srcs.append((abs, rel, exact))
1569 srcs.append((abs, rel, exact))
1569 return srcs
1570 return srcs
1570
1571
1571 if ctx.rev() is not None:
1572 if ctx.rev() is not None:
1572 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1573 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1573 absdest = pathutil.canonpath(repo.root, cwd, dest)
1574 absdest = pathutil.canonpath(repo.root, cwd, dest)
1574 if ctx.hasdir(absdest):
1575 if ctx.hasdir(absdest):
1575 raise error.InputError(
1576 raise error.InputError(
1576 _(b'%s: --at-rev does not support a directory as destination')
1577 _(b'%s: --at-rev does not support a directory as destination')
1577 % uipathfn(absdest)
1578 % uipathfn(absdest)
1578 )
1579 )
1579 if absdest not in ctx:
1580 if absdest not in ctx:
1580 raise error.InputError(
1581 raise error.InputError(
1581 _(b'%s: copy destination does not exist in %s')
1582 _(b'%s: copy destination does not exist in %s')
1582 % (uipathfn(absdest), ctx)
1583 % (uipathfn(absdest), ctx)
1583 )
1584 )
1584
1585
1585 # avoid cycle context -> subrepo -> cmdutil
1586 # avoid cycle context -> subrepo -> cmdutil
1586 from . import context
1587 from . import context
1587
1588
1588 copylist = []
1589 copylist = []
1589 for pat in pats:
1590 for pat in pats:
1590 srcs = walkpat(pat)
1591 srcs = walkpat(pat)
1591 if not srcs:
1592 if not srcs:
1592 continue
1593 continue
1593 for abs, rel, exact in srcs:
1594 for abs, rel, exact in srcs:
1594 copylist.append(abs)
1595 copylist.append(abs)
1595
1596
1596 if not copylist:
1597 if not copylist:
1597 raise error.InputError(_(b'no files to copy'))
1598 raise error.InputError(_(b'no files to copy'))
1598 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1599 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1599 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1600 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1600 # existing functions below.
1601 # existing functions below.
1601 if len(copylist) != 1:
1602 if len(copylist) != 1:
1602 raise error.InputError(_(b'--at-rev requires a single source'))
1603 raise error.InputError(_(b'--at-rev requires a single source'))
1603
1604
1604 new_ctx = context.overlayworkingctx(repo)
1605 new_ctx = context.overlayworkingctx(repo)
1605 new_ctx.setbase(ctx.p1())
1606 new_ctx.setbase(ctx.p1())
1606 mergemod.graft(repo, ctx, wctx=new_ctx)
1607 mergemod.graft(repo, ctx, wctx=new_ctx)
1607
1608
1608 new_ctx.markcopied(absdest, copylist[0])
1609 new_ctx.markcopied(absdest, copylist[0])
1609
1610
1610 with repo.lock():
1611 with repo.lock():
1611 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1612 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1612 new_node = mem_ctx.commit()
1613 new_node = mem_ctx.commit()
1613
1614
1614 if repo.dirstate.p1() == ctx.node():
1615 if repo.dirstate.p1() == ctx.node():
1615 with repo.dirstate.parentchange():
1616 with repo.dirstate.parentchange():
1616 scmutil.movedirstate(repo, repo[new_node])
1617 scmutil.movedirstate(repo, repo[new_node])
1617 replacements = {ctx.node(): [new_node]}
1618 replacements = {ctx.node(): [new_node]}
1618 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1619 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1619
1620
1620 return
1621 return
1621
1622
1622 # abssrc: hgsep
1623 # abssrc: hgsep
1623 # relsrc: ossep
1624 # relsrc: ossep
1624 # otarget: ossep
1625 # otarget: ossep
1625 def copyfile(abssrc, relsrc, otarget, exact):
1626 def copyfile(abssrc, relsrc, otarget, exact):
1626 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1627 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1627 if b'/' in abstarget:
1628 if b'/' in abstarget:
1628 # We cannot normalize abstarget itself, this would prevent
1629 # We cannot normalize abstarget itself, this would prevent
1629 # case only renames, like a => A.
1630 # case only renames, like a => A.
1630 abspath, absname = abstarget.rsplit(b'/', 1)
1631 abspath, absname = abstarget.rsplit(b'/', 1)
1631 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1632 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1632 reltarget = repo.pathto(abstarget, cwd)
1633 reltarget = repo.pathto(abstarget, cwd)
1633 target = repo.wjoin(abstarget)
1634 target = repo.wjoin(abstarget)
1634 src = repo.wjoin(abssrc)
1635 src = repo.wjoin(abssrc)
1635 state = repo.dirstate[abstarget]
1636 state = repo.dirstate[abstarget]
1636
1637
1637 scmutil.checkportable(ui, abstarget)
1638 scmutil.checkportable(ui, abstarget)
1638
1639
1639 # check for collisions
1640 # check for collisions
1640 prevsrc = targets.get(abstarget)
1641 prevsrc = targets.get(abstarget)
1641 if prevsrc is not None:
1642 if prevsrc is not None:
1642 ui.warn(
1643 ui.warn(
1643 _(b'%s: not overwriting - %s collides with %s\n')
1644 _(b'%s: not overwriting - %s collides with %s\n')
1644 % (
1645 % (
1645 reltarget,
1646 reltarget,
1646 repo.pathto(abssrc, cwd),
1647 repo.pathto(abssrc, cwd),
1647 repo.pathto(prevsrc, cwd),
1648 repo.pathto(prevsrc, cwd),
1648 )
1649 )
1649 )
1650 )
1650 return True # report a failure
1651 return True # report a failure
1651
1652
1652 # check for overwrites
1653 # check for overwrites
1653 exists = os.path.lexists(target)
1654 exists = os.path.lexists(target)
1654 samefile = False
1655 samefile = False
1655 if exists and abssrc != abstarget:
1656 if exists and abssrc != abstarget:
1656 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1657 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1657 abstarget
1658 abstarget
1658 ):
1659 ):
1659 if not rename:
1660 if not rename:
1660 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1661 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1661 return True # report a failure
1662 return True # report a failure
1662 exists = False
1663 exists = False
1663 samefile = True
1664 samefile = True
1664
1665
1665 if not after and exists or after and state in b'mn':
1666 if not after and exists or after and state in b'mn':
1666 if not opts[b'force']:
1667 if not opts[b'force']:
1667 if state in b'mn':
1668 if state in b'mn':
1668 msg = _(b'%s: not overwriting - file already committed\n')
1669 msg = _(b'%s: not overwriting - file already committed\n')
1669 if after:
1670 if after:
1670 flags = b'--after --force'
1671 flags = b'--after --force'
1671 else:
1672 else:
1672 flags = b'--force'
1673 flags = b'--force'
1673 if rename:
1674 if rename:
1674 hint = (
1675 hint = (
1675 _(
1676 _(
1676 b"('hg rename %s' to replace the file by "
1677 b"('hg rename %s' to replace the file by "
1677 b'recording a rename)\n'
1678 b'recording a rename)\n'
1678 )
1679 )
1679 % flags
1680 % flags
1680 )
1681 )
1681 else:
1682 else:
1682 hint = (
1683 hint = (
1683 _(
1684 _(
1684 b"('hg copy %s' to replace the file by "
1685 b"('hg copy %s' to replace the file by "
1685 b'recording a copy)\n'
1686 b'recording a copy)\n'
1686 )
1687 )
1687 % flags
1688 % flags
1688 )
1689 )
1689 else:
1690 else:
1690 msg = _(b'%s: not overwriting - file exists\n')
1691 msg = _(b'%s: not overwriting - file exists\n')
1691 if rename:
1692 if rename:
1692 hint = _(
1693 hint = _(
1693 b"('hg rename --after' to record the rename)\n"
1694 b"('hg rename --after' to record the rename)\n"
1694 )
1695 )
1695 else:
1696 else:
1696 hint = _(b"('hg copy --after' to record the copy)\n")
1697 hint = _(b"('hg copy --after' to record the copy)\n")
1697 ui.warn(msg % reltarget)
1698 ui.warn(msg % reltarget)
1698 ui.warn(hint)
1699 ui.warn(hint)
1699 return True # report a failure
1700 return True # report a failure
1700
1701
1701 if after:
1702 if after:
1702 if not exists:
1703 if not exists:
1703 if rename:
1704 if rename:
1704 ui.warn(
1705 ui.warn(
1705 _(b'%s: not recording move - %s does not exist\n')
1706 _(b'%s: not recording move - %s does not exist\n')
1706 % (relsrc, reltarget)
1707 % (relsrc, reltarget)
1707 )
1708 )
1708 else:
1709 else:
1709 ui.warn(
1710 ui.warn(
1710 _(b'%s: not recording copy - %s does not exist\n')
1711 _(b'%s: not recording copy - %s does not exist\n')
1711 % (relsrc, reltarget)
1712 % (relsrc, reltarget)
1712 )
1713 )
1713 return True # report a failure
1714 return True # report a failure
1714 elif not dryrun:
1715 elif not dryrun:
1715 try:
1716 try:
1716 if exists:
1717 if exists:
1717 os.unlink(target)
1718 os.unlink(target)
1718 targetdir = os.path.dirname(target) or b'.'
1719 targetdir = os.path.dirname(target) or b'.'
1719 if not os.path.isdir(targetdir):
1720 if not os.path.isdir(targetdir):
1720 os.makedirs(targetdir)
1721 os.makedirs(targetdir)
1721 if samefile:
1722 if samefile:
1722 tmp = target + b"~hgrename"
1723 tmp = target + b"~hgrename"
1723 os.rename(src, tmp)
1724 os.rename(src, tmp)
1724 os.rename(tmp, target)
1725 os.rename(tmp, target)
1725 else:
1726 else:
1726 # Preserve stat info on renames, not on copies; this matches
1727 # Preserve stat info on renames, not on copies; this matches
1727 # Linux CLI behavior.
1728 # Linux CLI behavior.
1728 util.copyfile(src, target, copystat=rename)
1729 util.copyfile(src, target, copystat=rename)
1729 srcexists = True
1730 srcexists = True
1730 except IOError as inst:
1731 except IOError as inst:
1731 if inst.errno == errno.ENOENT:
1732 if inst.errno == errno.ENOENT:
1732 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1733 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1733 srcexists = False
1734 srcexists = False
1734 else:
1735 else:
1735 ui.warn(
1736 ui.warn(
1736 _(b'%s: cannot copy - %s\n')
1737 _(b'%s: cannot copy - %s\n')
1737 % (relsrc, encoding.strtolocal(inst.strerror))
1738 % (relsrc, encoding.strtolocal(inst.strerror))
1738 )
1739 )
1739 return True # report a failure
1740 return True # report a failure
1740
1741
1741 if ui.verbose or not exact:
1742 if ui.verbose or not exact:
1742 if rename:
1743 if rename:
1743 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1744 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1744 else:
1745 else:
1745 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1746 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1746
1747
1747 targets[abstarget] = abssrc
1748 targets[abstarget] = abssrc
1748
1749
1749 # fix up dirstate
1750 # fix up dirstate
1750 scmutil.dirstatecopy(
1751 scmutil.dirstatecopy(
1751 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1752 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1752 )
1753 )
1753 if rename and not dryrun:
1754 if rename and not dryrun:
1754 if not after and srcexists and not samefile:
1755 if not after and srcexists and not samefile:
1755 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1756 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1756 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1757 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1757 ctx.forget([abssrc])
1758 ctx.forget([abssrc])
1758
1759
1759 # pat: ossep
1760 # pat: ossep
1760 # dest ossep
1761 # dest ossep
1761 # srcs: list of (hgsep, hgsep, ossep, bool)
1762 # srcs: list of (hgsep, hgsep, ossep, bool)
1762 # return: function that takes hgsep and returns ossep
1763 # return: function that takes hgsep and returns ossep
1763 def targetpathfn(pat, dest, srcs):
1764 def targetpathfn(pat, dest, srcs):
1764 if os.path.isdir(pat):
1765 if os.path.isdir(pat):
1765 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1766 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1766 abspfx = util.localpath(abspfx)
1767 abspfx = util.localpath(abspfx)
1767 if destdirexists:
1768 if destdirexists:
1768 striplen = len(os.path.split(abspfx)[0])
1769 striplen = len(os.path.split(abspfx)[0])
1769 else:
1770 else:
1770 striplen = len(abspfx)
1771 striplen = len(abspfx)
1771 if striplen:
1772 if striplen:
1772 striplen += len(pycompat.ossep)
1773 striplen += len(pycompat.ossep)
1773 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1774 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1774 elif destdirexists:
1775 elif destdirexists:
1775 res = lambda p: os.path.join(
1776 res = lambda p: os.path.join(
1776 dest, os.path.basename(util.localpath(p))
1777 dest, os.path.basename(util.localpath(p))
1777 )
1778 )
1778 else:
1779 else:
1779 res = lambda p: dest
1780 res = lambda p: dest
1780 return res
1781 return res
1781
1782
1782 # pat: ossep
1783 # pat: ossep
1783 # dest ossep
1784 # dest ossep
1784 # srcs: list of (hgsep, hgsep, ossep, bool)
1785 # srcs: list of (hgsep, hgsep, ossep, bool)
1785 # return: function that takes hgsep and returns ossep
1786 # return: function that takes hgsep and returns ossep
1786 def targetpathafterfn(pat, dest, srcs):
1787 def targetpathafterfn(pat, dest, srcs):
1787 if matchmod.patkind(pat):
1788 if matchmod.patkind(pat):
1788 # a mercurial pattern
1789 # a mercurial pattern
1789 res = lambda p: os.path.join(
1790 res = lambda p: os.path.join(
1790 dest, os.path.basename(util.localpath(p))
1791 dest, os.path.basename(util.localpath(p))
1791 )
1792 )
1792 else:
1793 else:
1793 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1794 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1794 if len(abspfx) < len(srcs[0][0]):
1795 if len(abspfx) < len(srcs[0][0]):
1795 # A directory. Either the target path contains the last
1796 # A directory. Either the target path contains the last
1796 # component of the source path or it does not.
1797 # component of the source path or it does not.
1797 def evalpath(striplen):
1798 def evalpath(striplen):
1798 score = 0
1799 score = 0
1799 for s in srcs:
1800 for s in srcs:
1800 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1801 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1801 if os.path.lexists(t):
1802 if os.path.lexists(t):
1802 score += 1
1803 score += 1
1803 return score
1804 return score
1804
1805
1805 abspfx = util.localpath(abspfx)
1806 abspfx = util.localpath(abspfx)
1806 striplen = len(abspfx)
1807 striplen = len(abspfx)
1807 if striplen:
1808 if striplen:
1808 striplen += len(pycompat.ossep)
1809 striplen += len(pycompat.ossep)
1809 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1810 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1810 score = evalpath(striplen)
1811 score = evalpath(striplen)
1811 striplen1 = len(os.path.split(abspfx)[0])
1812 striplen1 = len(os.path.split(abspfx)[0])
1812 if striplen1:
1813 if striplen1:
1813 striplen1 += len(pycompat.ossep)
1814 striplen1 += len(pycompat.ossep)
1814 if evalpath(striplen1) > score:
1815 if evalpath(striplen1) > score:
1815 striplen = striplen1
1816 striplen = striplen1
1816 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1817 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1817 else:
1818 else:
1818 # a file
1819 # a file
1819 if destdirexists:
1820 if destdirexists:
1820 res = lambda p: os.path.join(
1821 res = lambda p: os.path.join(
1821 dest, os.path.basename(util.localpath(p))
1822 dest, os.path.basename(util.localpath(p))
1822 )
1823 )
1823 else:
1824 else:
1824 res = lambda p: dest
1825 res = lambda p: dest
1825 return res
1826 return res
1826
1827
1827 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1828 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1828 if not destdirexists:
1829 if not destdirexists:
1829 if len(pats) > 1 or matchmod.patkind(pats[0]):
1830 if len(pats) > 1 or matchmod.patkind(pats[0]):
1830 raise error.InputError(
1831 raise error.InputError(
1831 _(
1832 _(
1832 b'with multiple sources, destination must be an '
1833 b'with multiple sources, destination must be an '
1833 b'existing directory'
1834 b'existing directory'
1834 )
1835 )
1835 )
1836 )
1836 if util.endswithsep(dest):
1837 if util.endswithsep(dest):
1837 raise error.InputError(
1838 raise error.InputError(
1838 _(b'destination %s is not a directory') % dest
1839 _(b'destination %s is not a directory') % dest
1839 )
1840 )
1840
1841
1841 tfn = targetpathfn
1842 tfn = targetpathfn
1842 if after:
1843 if after:
1843 tfn = targetpathafterfn
1844 tfn = targetpathafterfn
1844 copylist = []
1845 copylist = []
1845 for pat in pats:
1846 for pat in pats:
1846 srcs = walkpat(pat)
1847 srcs = walkpat(pat)
1847 if not srcs:
1848 if not srcs:
1848 continue
1849 continue
1849 copylist.append((tfn(pat, dest, srcs), srcs))
1850 copylist.append((tfn(pat, dest, srcs), srcs))
1850 if not copylist:
1851 if not copylist:
1851 raise error.InputError(_(b'no files to copy'))
1852 raise error.InputError(_(b'no files to copy'))
1852
1853
1853 errors = 0
1854 errors = 0
1854 for targetpath, srcs in copylist:
1855 for targetpath, srcs in copylist:
1855 for abssrc, relsrc, exact in srcs:
1856 for abssrc, relsrc, exact in srcs:
1856 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1857 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1857 errors += 1
1858 errors += 1
1858
1859
1859 return errors != 0
1860 return errors != 0
1860
1861
1861
1862
1862 ## facility to let extension process additional data into an import patch
1863 ## facility to let extension process additional data into an import patch
1863 # list of identifier to be executed in order
1864 # list of identifier to be executed in order
1864 extrapreimport = [] # run before commit
1865 extrapreimport = [] # run before commit
1865 extrapostimport = [] # run after commit
1866 extrapostimport = [] # run after commit
1866 # mapping from identifier to actual import function
1867 # mapping from identifier to actual import function
1867 #
1868 #
1868 # 'preimport' are run before the commit is made and are provided the following
1869 # 'preimport' are run before the commit is made and are provided the following
1869 # arguments:
1870 # arguments:
1870 # - repo: the localrepository instance,
1871 # - repo: the localrepository instance,
1871 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1872 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1872 # - extra: the future extra dictionary of the changeset, please mutate it,
1873 # - extra: the future extra dictionary of the changeset, please mutate it,
1873 # - opts: the import options.
1874 # - opts: the import options.
1874 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1875 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1875 # mutation of in memory commit and more. Feel free to rework the code to get
1876 # mutation of in memory commit and more. Feel free to rework the code to get
1876 # there.
1877 # there.
1877 extrapreimportmap = {}
1878 extrapreimportmap = {}
1878 # 'postimport' are run after the commit is made and are provided the following
1879 # 'postimport' are run after the commit is made and are provided the following
1879 # argument:
1880 # argument:
1880 # - ctx: the changectx created by import.
1881 # - ctx: the changectx created by import.
1881 extrapostimportmap = {}
1882 extrapostimportmap = {}
1882
1883
1883
1884
1884 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1885 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1885 """Utility function used by commands.import to import a single patch
1886 """Utility function used by commands.import to import a single patch
1886
1887
1887 This function is explicitly defined here to help the evolve extension to
1888 This function is explicitly defined here to help the evolve extension to
1888 wrap this part of the import logic.
1889 wrap this part of the import logic.
1889
1890
1890 The API is currently a bit ugly because it a simple code translation from
1891 The API is currently a bit ugly because it a simple code translation from
1891 the import command. Feel free to make it better.
1892 the import command. Feel free to make it better.
1892
1893
1893 :patchdata: a dictionary containing parsed patch data (such as from
1894 :patchdata: a dictionary containing parsed patch data (such as from
1894 ``patch.extract()``)
1895 ``patch.extract()``)
1895 :parents: nodes that will be parent of the created commit
1896 :parents: nodes that will be parent of the created commit
1896 :opts: the full dict of option passed to the import command
1897 :opts: the full dict of option passed to the import command
1897 :msgs: list to save commit message to.
1898 :msgs: list to save commit message to.
1898 (used in case we need to save it when failing)
1899 (used in case we need to save it when failing)
1899 :updatefunc: a function that update a repo to a given node
1900 :updatefunc: a function that update a repo to a given node
1900 updatefunc(<repo>, <node>)
1901 updatefunc(<repo>, <node>)
1901 """
1902 """
1902 # avoid cycle context -> subrepo -> cmdutil
1903 # avoid cycle context -> subrepo -> cmdutil
1903 from . import context
1904 from . import context
1904
1905
1905 tmpname = patchdata.get(b'filename')
1906 tmpname = patchdata.get(b'filename')
1906 message = patchdata.get(b'message')
1907 message = patchdata.get(b'message')
1907 user = opts.get(b'user') or patchdata.get(b'user')
1908 user = opts.get(b'user') or patchdata.get(b'user')
1908 date = opts.get(b'date') or patchdata.get(b'date')
1909 date = opts.get(b'date') or patchdata.get(b'date')
1909 branch = patchdata.get(b'branch')
1910 branch = patchdata.get(b'branch')
1910 nodeid = patchdata.get(b'nodeid')
1911 nodeid = patchdata.get(b'nodeid')
1911 p1 = patchdata.get(b'p1')
1912 p1 = patchdata.get(b'p1')
1912 p2 = patchdata.get(b'p2')
1913 p2 = patchdata.get(b'p2')
1913
1914
1914 nocommit = opts.get(b'no_commit')
1915 nocommit = opts.get(b'no_commit')
1915 importbranch = opts.get(b'import_branch')
1916 importbranch = opts.get(b'import_branch')
1916 update = not opts.get(b'bypass')
1917 update = not opts.get(b'bypass')
1917 strip = opts[b"strip"]
1918 strip = opts[b"strip"]
1918 prefix = opts[b"prefix"]
1919 prefix = opts[b"prefix"]
1919 sim = float(opts.get(b'similarity') or 0)
1920 sim = float(opts.get(b'similarity') or 0)
1920
1921
1921 if not tmpname:
1922 if not tmpname:
1922 return None, None, False
1923 return None, None, False
1923
1924
1924 rejects = False
1925 rejects = False
1925
1926
1926 cmdline_message = logmessage(ui, opts)
1927 cmdline_message = logmessage(ui, opts)
1927 if cmdline_message:
1928 if cmdline_message:
1928 # pickup the cmdline msg
1929 # pickup the cmdline msg
1929 message = cmdline_message
1930 message = cmdline_message
1930 elif message:
1931 elif message:
1931 # pickup the patch msg
1932 # pickup the patch msg
1932 message = message.strip()
1933 message = message.strip()
1933 else:
1934 else:
1934 # launch the editor
1935 # launch the editor
1935 message = None
1936 message = None
1936 ui.debug(b'message:\n%s\n' % (message or b''))
1937 ui.debug(b'message:\n%s\n' % (message or b''))
1937
1938
1938 if len(parents) == 1:
1939 if len(parents) == 1:
1939 parents.append(repo[nullid])
1940 parents.append(repo[nullrev])
1940 if opts.get(b'exact'):
1941 if opts.get(b'exact'):
1941 if not nodeid or not p1:
1942 if not nodeid or not p1:
1942 raise error.InputError(_(b'not a Mercurial patch'))
1943 raise error.InputError(_(b'not a Mercurial patch'))
1943 p1 = repo[p1]
1944 p1 = repo[p1]
1944 p2 = repo[p2 or nullid]
1945 p2 = repo[p2 or nullrev]
1945 elif p2:
1946 elif p2:
1946 try:
1947 try:
1947 p1 = repo[p1]
1948 p1 = repo[p1]
1948 p2 = repo[p2]
1949 p2 = repo[p2]
1949 # Without any options, consider p2 only if the
1950 # Without any options, consider p2 only if the
1950 # patch is being applied on top of the recorded
1951 # patch is being applied on top of the recorded
1951 # first parent.
1952 # first parent.
1952 if p1 != parents[0]:
1953 if p1 != parents[0]:
1953 p1 = parents[0]
1954 p1 = parents[0]
1954 p2 = repo[nullid]
1955 p2 = repo[nullrev]
1955 except error.RepoError:
1956 except error.RepoError:
1956 p1, p2 = parents
1957 p1, p2 = parents
1957 if p2.node() == nullid:
1958 if p2.rev() == nullrev:
1958 ui.warn(
1959 ui.warn(
1959 _(
1960 _(
1960 b"warning: import the patch as a normal revision\n"
1961 b"warning: import the patch as a normal revision\n"
1961 b"(use --exact to import the patch as a merge)\n"
1962 b"(use --exact to import the patch as a merge)\n"
1962 )
1963 )
1963 )
1964 )
1964 else:
1965 else:
1965 p1, p2 = parents
1966 p1, p2 = parents
1966
1967
1967 n = None
1968 n = None
1968 if update:
1969 if update:
1969 if p1 != parents[0]:
1970 if p1 != parents[0]:
1970 updatefunc(repo, p1.node())
1971 updatefunc(repo, p1.node())
1971 if p2 != parents[1]:
1972 if p2 != parents[1]:
1972 repo.setparents(p1.node(), p2.node())
1973 repo.setparents(p1.node(), p2.node())
1973
1974
1974 if opts.get(b'exact') or importbranch:
1975 if opts.get(b'exact') or importbranch:
1975 repo.dirstate.setbranch(branch or b'default')
1976 repo.dirstate.setbranch(branch or b'default')
1976
1977
1977 partial = opts.get(b'partial', False)
1978 partial = opts.get(b'partial', False)
1978 files = set()
1979 files = set()
1979 try:
1980 try:
1980 patch.patch(
1981 patch.patch(
1981 ui,
1982 ui,
1982 repo,
1983 repo,
1983 tmpname,
1984 tmpname,
1984 strip=strip,
1985 strip=strip,
1985 prefix=prefix,
1986 prefix=prefix,
1986 files=files,
1987 files=files,
1987 eolmode=None,
1988 eolmode=None,
1988 similarity=sim / 100.0,
1989 similarity=sim / 100.0,
1989 )
1990 )
1990 except error.PatchError as e:
1991 except error.PatchError as e:
1991 if not partial:
1992 if not partial:
1992 raise error.Abort(pycompat.bytestr(e))
1993 raise error.Abort(pycompat.bytestr(e))
1993 if partial:
1994 if partial:
1994 rejects = True
1995 rejects = True
1995
1996
1996 files = list(files)
1997 files = list(files)
1997 if nocommit:
1998 if nocommit:
1998 if message:
1999 if message:
1999 msgs.append(message)
2000 msgs.append(message)
2000 else:
2001 else:
2001 if opts.get(b'exact') or p2:
2002 if opts.get(b'exact') or p2:
2002 # If you got here, you either use --force and know what
2003 # If you got here, you either use --force and know what
2003 # you are doing or used --exact or a merge patch while
2004 # you are doing or used --exact or a merge patch while
2004 # being updated to its first parent.
2005 # being updated to its first parent.
2005 m = None
2006 m = None
2006 else:
2007 else:
2007 m = scmutil.matchfiles(repo, files or [])
2008 m = scmutil.matchfiles(repo, files or [])
2008 editform = mergeeditform(repo[None], b'import.normal')
2009 editform = mergeeditform(repo[None], b'import.normal')
2009 if opts.get(b'exact'):
2010 if opts.get(b'exact'):
2010 editor = None
2011 editor = None
2011 else:
2012 else:
2012 editor = getcommiteditor(
2013 editor = getcommiteditor(
2013 editform=editform, **pycompat.strkwargs(opts)
2014 editform=editform, **pycompat.strkwargs(opts)
2014 )
2015 )
2015 extra = {}
2016 extra = {}
2016 for idfunc in extrapreimport:
2017 for idfunc in extrapreimport:
2017 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2018 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2018 overrides = {}
2019 overrides = {}
2019 if partial:
2020 if partial:
2020 overrides[(b'ui', b'allowemptycommit')] = True
2021 overrides[(b'ui', b'allowemptycommit')] = True
2021 if opts.get(b'secret'):
2022 if opts.get(b'secret'):
2022 overrides[(b'phases', b'new-commit')] = b'secret'
2023 overrides[(b'phases', b'new-commit')] = b'secret'
2023 with repo.ui.configoverride(overrides, b'import'):
2024 with repo.ui.configoverride(overrides, b'import'):
2024 n = repo.commit(
2025 n = repo.commit(
2025 message, user, date, match=m, editor=editor, extra=extra
2026 message, user, date, match=m, editor=editor, extra=extra
2026 )
2027 )
2027 for idfunc in extrapostimport:
2028 for idfunc in extrapostimport:
2028 extrapostimportmap[idfunc](repo[n])
2029 extrapostimportmap[idfunc](repo[n])
2029 else:
2030 else:
2030 if opts.get(b'exact') or importbranch:
2031 if opts.get(b'exact') or importbranch:
2031 branch = branch or b'default'
2032 branch = branch or b'default'
2032 else:
2033 else:
2033 branch = p1.branch()
2034 branch = p1.branch()
2034 store = patch.filestore()
2035 store = patch.filestore()
2035 try:
2036 try:
2036 files = set()
2037 files = set()
2037 try:
2038 try:
2038 patch.patchrepo(
2039 patch.patchrepo(
2039 ui,
2040 ui,
2040 repo,
2041 repo,
2041 p1,
2042 p1,
2042 store,
2043 store,
2043 tmpname,
2044 tmpname,
2044 strip,
2045 strip,
2045 prefix,
2046 prefix,
2046 files,
2047 files,
2047 eolmode=None,
2048 eolmode=None,
2048 )
2049 )
2049 except error.PatchError as e:
2050 except error.PatchError as e:
2050 raise error.Abort(stringutil.forcebytestr(e))
2051 raise error.Abort(stringutil.forcebytestr(e))
2051 if opts.get(b'exact'):
2052 if opts.get(b'exact'):
2052 editor = None
2053 editor = None
2053 else:
2054 else:
2054 editor = getcommiteditor(editform=b'import.bypass')
2055 editor = getcommiteditor(editform=b'import.bypass')
2055 memctx = context.memctx(
2056 memctx = context.memctx(
2056 repo,
2057 repo,
2057 (p1.node(), p2.node()),
2058 (p1.node(), p2.node()),
2058 message,
2059 message,
2059 files=files,
2060 files=files,
2060 filectxfn=store,
2061 filectxfn=store,
2061 user=user,
2062 user=user,
2062 date=date,
2063 date=date,
2063 branch=branch,
2064 branch=branch,
2064 editor=editor,
2065 editor=editor,
2065 )
2066 )
2066
2067
2067 overrides = {}
2068 overrides = {}
2068 if opts.get(b'secret'):
2069 if opts.get(b'secret'):
2069 overrides[(b'phases', b'new-commit')] = b'secret'
2070 overrides[(b'phases', b'new-commit')] = b'secret'
2070 with repo.ui.configoverride(overrides, b'import'):
2071 with repo.ui.configoverride(overrides, b'import'):
2071 n = memctx.commit()
2072 n = memctx.commit()
2072 finally:
2073 finally:
2073 store.close()
2074 store.close()
2074 if opts.get(b'exact') and nocommit:
2075 if opts.get(b'exact') and nocommit:
2075 # --exact with --no-commit is still useful in that it does merge
2076 # --exact with --no-commit is still useful in that it does merge
2076 # and branch bits
2077 # and branch bits
2077 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2078 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2078 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2079 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2079 raise error.Abort(_(b'patch is damaged or loses information'))
2080 raise error.Abort(_(b'patch is damaged or loses information'))
2080 msg = _(b'applied to working directory')
2081 msg = _(b'applied to working directory')
2081 if n:
2082 if n:
2082 # i18n: refers to a short changeset id
2083 # i18n: refers to a short changeset id
2083 msg = _(b'created %s') % short(n)
2084 msg = _(b'created %s') % short(n)
2084 return msg, n, rejects
2085 return msg, n, rejects
2085
2086
2086
2087
2087 # facility to let extensions include additional data in an exported patch
2088 # facility to let extensions include additional data in an exported patch
2088 # list of identifiers to be executed in order
2089 # list of identifiers to be executed in order
2089 extraexport = []
2090 extraexport = []
2090 # mapping from identifier to actual export function
2091 # mapping from identifier to actual export function
2091 # function as to return a string to be added to the header or None
2092 # function as to return a string to be added to the header or None
2092 # it is given two arguments (sequencenumber, changectx)
2093 # it is given two arguments (sequencenumber, changectx)
2093 extraexportmap = {}
2094 extraexportmap = {}
2094
2095
2095
2096
2096 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2097 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2097 node = scmutil.binnode(ctx)
2098 node = scmutil.binnode(ctx)
2098 parents = [p.node() for p in ctx.parents() if p]
2099 parents = [p.node() for p in ctx.parents() if p]
2099 branch = ctx.branch()
2100 branch = ctx.branch()
2100 if switch_parent:
2101 if switch_parent:
2101 parents.reverse()
2102 parents.reverse()
2102
2103
2103 if parents:
2104 if parents:
2104 prev = parents[0]
2105 prev = parents[0]
2105 else:
2106 else:
2106 prev = nullid
2107 prev = nullid
2107
2108
2108 fm.context(ctx=ctx)
2109 fm.context(ctx=ctx)
2109 fm.plain(b'# HG changeset patch\n')
2110 fm.plain(b'# HG changeset patch\n')
2110 fm.write(b'user', b'# User %s\n', ctx.user())
2111 fm.write(b'user', b'# User %s\n', ctx.user())
2111 fm.plain(b'# Date %d %d\n' % ctx.date())
2112 fm.plain(b'# Date %d %d\n' % ctx.date())
2112 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2113 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2113 fm.condwrite(
2114 fm.condwrite(
2114 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2115 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2115 )
2116 )
2116 fm.write(b'node', b'# Node ID %s\n', hex(node))
2117 fm.write(b'node', b'# Node ID %s\n', hex(node))
2117 fm.plain(b'# Parent %s\n' % hex(prev))
2118 fm.plain(b'# Parent %s\n' % hex(prev))
2118 if len(parents) > 1:
2119 if len(parents) > 1:
2119 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2120 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2120 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2121 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2121
2122
2122 # TODO: redesign extraexportmap function to support formatter
2123 # TODO: redesign extraexportmap function to support formatter
2123 for headerid in extraexport:
2124 for headerid in extraexport:
2124 header = extraexportmap[headerid](seqno, ctx)
2125 header = extraexportmap[headerid](seqno, ctx)
2125 if header is not None:
2126 if header is not None:
2126 fm.plain(b'# %s\n' % header)
2127 fm.plain(b'# %s\n' % header)
2127
2128
2128 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2129 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2129 fm.plain(b'\n')
2130 fm.plain(b'\n')
2130
2131
2131 if fm.isplain():
2132 if fm.isplain():
2132 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2133 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2133 for chunk, label in chunkiter:
2134 for chunk, label in chunkiter:
2134 fm.plain(chunk, label=label)
2135 fm.plain(chunk, label=label)
2135 else:
2136 else:
2136 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2137 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2137 # TODO: make it structured?
2138 # TODO: make it structured?
2138 fm.data(diff=b''.join(chunkiter))
2139 fm.data(diff=b''.join(chunkiter))
2139
2140
2140
2141
2141 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2142 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2142 """Export changesets to stdout or a single file"""
2143 """Export changesets to stdout or a single file"""
2143 for seqno, rev in enumerate(revs, 1):
2144 for seqno, rev in enumerate(revs, 1):
2144 ctx = repo[rev]
2145 ctx = repo[rev]
2145 if not dest.startswith(b'<'):
2146 if not dest.startswith(b'<'):
2146 repo.ui.note(b"%s\n" % dest)
2147 repo.ui.note(b"%s\n" % dest)
2147 fm.startitem()
2148 fm.startitem()
2148 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2149 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2149
2150
2150
2151
2151 def _exportfntemplate(
2152 def _exportfntemplate(
2152 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2153 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2153 ):
2154 ):
2154 """Export changesets to possibly multiple files"""
2155 """Export changesets to possibly multiple files"""
2155 total = len(revs)
2156 total = len(revs)
2156 revwidth = max(len(str(rev)) for rev in revs)
2157 revwidth = max(len(str(rev)) for rev in revs)
2157 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2158 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2158
2159
2159 for seqno, rev in enumerate(revs, 1):
2160 for seqno, rev in enumerate(revs, 1):
2160 ctx = repo[rev]
2161 ctx = repo[rev]
2161 dest = makefilename(
2162 dest = makefilename(
2162 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2163 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2163 )
2164 )
2164 filemap.setdefault(dest, []).append((seqno, rev))
2165 filemap.setdefault(dest, []).append((seqno, rev))
2165
2166
2166 for dest in filemap:
2167 for dest in filemap:
2167 with formatter.maybereopen(basefm, dest) as fm:
2168 with formatter.maybereopen(basefm, dest) as fm:
2168 repo.ui.note(b"%s\n" % dest)
2169 repo.ui.note(b"%s\n" % dest)
2169 for seqno, rev in filemap[dest]:
2170 for seqno, rev in filemap[dest]:
2170 fm.startitem()
2171 fm.startitem()
2171 ctx = repo[rev]
2172 ctx = repo[rev]
2172 _exportsingle(
2173 _exportsingle(
2173 repo, ctx, fm, match, switch_parent, seqno, diffopts
2174 repo, ctx, fm, match, switch_parent, seqno, diffopts
2174 )
2175 )
2175
2176
2176
2177
2177 def _prefetchchangedfiles(repo, revs, match):
2178 def _prefetchchangedfiles(repo, revs, match):
2178 allfiles = set()
2179 allfiles = set()
2179 for rev in revs:
2180 for rev in revs:
2180 for file in repo[rev].files():
2181 for file in repo[rev].files():
2181 if not match or match(file):
2182 if not match or match(file):
2182 allfiles.add(file)
2183 allfiles.add(file)
2183 match = scmutil.matchfiles(repo, allfiles)
2184 match = scmutil.matchfiles(repo, allfiles)
2184 revmatches = [(rev, match) for rev in revs]
2185 revmatches = [(rev, match) for rev in revs]
2185 scmutil.prefetchfiles(repo, revmatches)
2186 scmutil.prefetchfiles(repo, revmatches)
2186
2187
2187
2188
2188 def export(
2189 def export(
2189 repo,
2190 repo,
2190 revs,
2191 revs,
2191 basefm,
2192 basefm,
2192 fntemplate=b'hg-%h.patch',
2193 fntemplate=b'hg-%h.patch',
2193 switch_parent=False,
2194 switch_parent=False,
2194 opts=None,
2195 opts=None,
2195 match=None,
2196 match=None,
2196 ):
2197 ):
2197 """export changesets as hg patches
2198 """export changesets as hg patches
2198
2199
2199 Args:
2200 Args:
2200 repo: The repository from which we're exporting revisions.
2201 repo: The repository from which we're exporting revisions.
2201 revs: A list of revisions to export as revision numbers.
2202 revs: A list of revisions to export as revision numbers.
2202 basefm: A formatter to which patches should be written.
2203 basefm: A formatter to which patches should be written.
2203 fntemplate: An optional string to use for generating patch file names.
2204 fntemplate: An optional string to use for generating patch file names.
2204 switch_parent: If True, show diffs against second parent when not nullid.
2205 switch_parent: If True, show diffs against second parent when not nullid.
2205 Default is false, which always shows diff against p1.
2206 Default is false, which always shows diff against p1.
2206 opts: diff options to use for generating the patch.
2207 opts: diff options to use for generating the patch.
2207 match: If specified, only export changes to files matching this matcher.
2208 match: If specified, only export changes to files matching this matcher.
2208
2209
2209 Returns:
2210 Returns:
2210 Nothing.
2211 Nothing.
2211
2212
2212 Side Effect:
2213 Side Effect:
2213 "HG Changeset Patch" data is emitted to one of the following
2214 "HG Changeset Patch" data is emitted to one of the following
2214 destinations:
2215 destinations:
2215 fntemplate specified: Each rev is written to a unique file named using
2216 fntemplate specified: Each rev is written to a unique file named using
2216 the given template.
2217 the given template.
2217 Otherwise: All revs will be written to basefm.
2218 Otherwise: All revs will be written to basefm.
2218 """
2219 """
2219 _prefetchchangedfiles(repo, revs, match)
2220 _prefetchchangedfiles(repo, revs, match)
2220
2221
2221 if not fntemplate:
2222 if not fntemplate:
2222 _exportfile(
2223 _exportfile(
2223 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2224 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2224 )
2225 )
2225 else:
2226 else:
2226 _exportfntemplate(
2227 _exportfntemplate(
2227 repo, revs, basefm, fntemplate, switch_parent, opts, match
2228 repo, revs, basefm, fntemplate, switch_parent, opts, match
2228 )
2229 )
2229
2230
2230
2231
2231 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2232 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2232 """Export changesets to the given file stream"""
2233 """Export changesets to the given file stream"""
2233 _prefetchchangedfiles(repo, revs, match)
2234 _prefetchchangedfiles(repo, revs, match)
2234
2235
2235 dest = getattr(fp, 'name', b'<unnamed>')
2236 dest = getattr(fp, 'name', b'<unnamed>')
2236 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2237 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2237 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2238 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2238
2239
2239
2240
2240 def showmarker(fm, marker, index=None):
2241 def showmarker(fm, marker, index=None):
2241 """utility function to display obsolescence marker in a readable way
2242 """utility function to display obsolescence marker in a readable way
2242
2243
2243 To be used by debug function."""
2244 To be used by debug function."""
2244 if index is not None:
2245 if index is not None:
2245 fm.write(b'index', b'%i ', index)
2246 fm.write(b'index', b'%i ', index)
2246 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2247 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2247 succs = marker.succnodes()
2248 succs = marker.succnodes()
2248 fm.condwrite(
2249 fm.condwrite(
2249 succs,
2250 succs,
2250 b'succnodes',
2251 b'succnodes',
2251 b'%s ',
2252 b'%s ',
2252 fm.formatlist(map(hex, succs), name=b'node'),
2253 fm.formatlist(map(hex, succs), name=b'node'),
2253 )
2254 )
2254 fm.write(b'flag', b'%X ', marker.flags())
2255 fm.write(b'flag', b'%X ', marker.flags())
2255 parents = marker.parentnodes()
2256 parents = marker.parentnodes()
2256 if parents is not None:
2257 if parents is not None:
2257 fm.write(
2258 fm.write(
2258 b'parentnodes',
2259 b'parentnodes',
2259 b'{%s} ',
2260 b'{%s} ',
2260 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2261 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2261 )
2262 )
2262 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2263 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2263 meta = marker.metadata().copy()
2264 meta = marker.metadata().copy()
2264 meta.pop(b'date', None)
2265 meta.pop(b'date', None)
2265 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2266 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2266 fm.write(
2267 fm.write(
2267 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2268 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2268 )
2269 )
2269 fm.plain(b'\n')
2270 fm.plain(b'\n')
2270
2271
2271
2272
2272 def finddate(ui, repo, date):
2273 def finddate(ui, repo, date):
2273 """Find the tipmost changeset that matches the given date spec"""
2274 """Find the tipmost changeset that matches the given date spec"""
2274 mrevs = repo.revs(b'date(%s)', date)
2275 mrevs = repo.revs(b'date(%s)', date)
2275 try:
2276 try:
2276 rev = mrevs.max()
2277 rev = mrevs.max()
2277 except ValueError:
2278 except ValueError:
2278 raise error.InputError(_(b"revision matching date not found"))
2279 raise error.InputError(_(b"revision matching date not found"))
2279
2280
2280 ui.status(
2281 ui.status(
2281 _(b"found revision %d from %s\n")
2282 _(b"found revision %d from %s\n")
2282 % (rev, dateutil.datestr(repo[rev].date()))
2283 % (rev, dateutil.datestr(repo[rev].date()))
2283 )
2284 )
2284 return b'%d' % rev
2285 return b'%d' % rev
2285
2286
2286
2287
2287 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2288 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2288 bad = []
2289 bad = []
2289
2290
2290 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2291 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2291 names = []
2292 names = []
2292 wctx = repo[None]
2293 wctx = repo[None]
2293 cca = None
2294 cca = None
2294 abort, warn = scmutil.checkportabilityalert(ui)
2295 abort, warn = scmutil.checkportabilityalert(ui)
2295 if abort or warn:
2296 if abort or warn:
2296 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2297 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2297
2298
2298 match = repo.narrowmatch(match, includeexact=True)
2299 match = repo.narrowmatch(match, includeexact=True)
2299 badmatch = matchmod.badmatch(match, badfn)
2300 badmatch = matchmod.badmatch(match, badfn)
2300 dirstate = repo.dirstate
2301 dirstate = repo.dirstate
2301 # We don't want to just call wctx.walk here, since it would return a lot of
2302 # We don't want to just call wctx.walk here, since it would return a lot of
2302 # clean files, which we aren't interested in and takes time.
2303 # clean files, which we aren't interested in and takes time.
2303 for f in sorted(
2304 for f in sorted(
2304 dirstate.walk(
2305 dirstate.walk(
2305 badmatch,
2306 badmatch,
2306 subrepos=sorted(wctx.substate),
2307 subrepos=sorted(wctx.substate),
2307 unknown=True,
2308 unknown=True,
2308 ignored=False,
2309 ignored=False,
2309 full=False,
2310 full=False,
2310 )
2311 )
2311 ):
2312 ):
2312 exact = match.exact(f)
2313 exact = match.exact(f)
2313 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2314 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2314 if cca:
2315 if cca:
2315 cca(f)
2316 cca(f)
2316 names.append(f)
2317 names.append(f)
2317 if ui.verbose or not exact:
2318 if ui.verbose or not exact:
2318 ui.status(
2319 ui.status(
2319 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2320 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2320 )
2321 )
2321
2322
2322 for subpath in sorted(wctx.substate):
2323 for subpath in sorted(wctx.substate):
2323 sub = wctx.sub(subpath)
2324 sub = wctx.sub(subpath)
2324 try:
2325 try:
2325 submatch = matchmod.subdirmatcher(subpath, match)
2326 submatch = matchmod.subdirmatcher(subpath, match)
2326 subprefix = repo.wvfs.reljoin(prefix, subpath)
2327 subprefix = repo.wvfs.reljoin(prefix, subpath)
2327 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2328 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2328 if opts.get('subrepos'):
2329 if opts.get('subrepos'):
2329 bad.extend(
2330 bad.extend(
2330 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2331 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2331 )
2332 )
2332 else:
2333 else:
2333 bad.extend(
2334 bad.extend(
2334 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2335 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2335 )
2336 )
2336 except error.LookupError:
2337 except error.LookupError:
2337 ui.status(
2338 ui.status(
2338 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2339 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2339 )
2340 )
2340
2341
2341 if not opts.get('dry_run'):
2342 if not opts.get('dry_run'):
2342 rejected = wctx.add(names, prefix)
2343 rejected = wctx.add(names, prefix)
2343 bad.extend(f for f in rejected if f in match.files())
2344 bad.extend(f for f in rejected if f in match.files())
2344 return bad
2345 return bad
2345
2346
2346
2347
2347 def addwebdirpath(repo, serverpath, webconf):
2348 def addwebdirpath(repo, serverpath, webconf):
2348 webconf[serverpath] = repo.root
2349 webconf[serverpath] = repo.root
2349 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2350 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2350
2351
2351 for r in repo.revs(b'filelog("path:.hgsub")'):
2352 for r in repo.revs(b'filelog("path:.hgsub")'):
2352 ctx = repo[r]
2353 ctx = repo[r]
2353 for subpath in ctx.substate:
2354 for subpath in ctx.substate:
2354 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2355 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2355
2356
2356
2357
2357 def forget(
2358 def forget(
2358 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2359 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2359 ):
2360 ):
2360 if dryrun and interactive:
2361 if dryrun and interactive:
2361 raise error.InputError(
2362 raise error.InputError(
2362 _(b"cannot specify both --dry-run and --interactive")
2363 _(b"cannot specify both --dry-run and --interactive")
2363 )
2364 )
2364 bad = []
2365 bad = []
2365 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2366 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2366 wctx = repo[None]
2367 wctx = repo[None]
2367 forgot = []
2368 forgot = []
2368
2369
2369 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2370 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2370 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2371 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2371 if explicitonly:
2372 if explicitonly:
2372 forget = [f for f in forget if match.exact(f)]
2373 forget = [f for f in forget if match.exact(f)]
2373
2374
2374 for subpath in sorted(wctx.substate):
2375 for subpath in sorted(wctx.substate):
2375 sub = wctx.sub(subpath)
2376 sub = wctx.sub(subpath)
2376 submatch = matchmod.subdirmatcher(subpath, match)
2377 submatch = matchmod.subdirmatcher(subpath, match)
2377 subprefix = repo.wvfs.reljoin(prefix, subpath)
2378 subprefix = repo.wvfs.reljoin(prefix, subpath)
2378 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2379 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2379 try:
2380 try:
2380 subbad, subforgot = sub.forget(
2381 subbad, subforgot = sub.forget(
2381 submatch,
2382 submatch,
2382 subprefix,
2383 subprefix,
2383 subuipathfn,
2384 subuipathfn,
2384 dryrun=dryrun,
2385 dryrun=dryrun,
2385 interactive=interactive,
2386 interactive=interactive,
2386 )
2387 )
2387 bad.extend([subpath + b'/' + f for f in subbad])
2388 bad.extend([subpath + b'/' + f for f in subbad])
2388 forgot.extend([subpath + b'/' + f for f in subforgot])
2389 forgot.extend([subpath + b'/' + f for f in subforgot])
2389 except error.LookupError:
2390 except error.LookupError:
2390 ui.status(
2391 ui.status(
2391 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2392 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2392 )
2393 )
2393
2394
2394 if not explicitonly:
2395 if not explicitonly:
2395 for f in match.files():
2396 for f in match.files():
2396 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2397 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2397 if f not in forgot:
2398 if f not in forgot:
2398 if repo.wvfs.exists(f):
2399 if repo.wvfs.exists(f):
2399 # Don't complain if the exact case match wasn't given.
2400 # Don't complain if the exact case match wasn't given.
2400 # But don't do this until after checking 'forgot', so
2401 # But don't do this until after checking 'forgot', so
2401 # that subrepo files aren't normalized, and this op is
2402 # that subrepo files aren't normalized, and this op is
2402 # purely from data cached by the status walk above.
2403 # purely from data cached by the status walk above.
2403 if repo.dirstate.normalize(f) in repo.dirstate:
2404 if repo.dirstate.normalize(f) in repo.dirstate:
2404 continue
2405 continue
2405 ui.warn(
2406 ui.warn(
2406 _(
2407 _(
2407 b'not removing %s: '
2408 b'not removing %s: '
2408 b'file is already untracked\n'
2409 b'file is already untracked\n'
2409 )
2410 )
2410 % uipathfn(f)
2411 % uipathfn(f)
2411 )
2412 )
2412 bad.append(f)
2413 bad.append(f)
2413
2414
2414 if interactive:
2415 if interactive:
2415 responses = _(
2416 responses = _(
2416 b'[Ynsa?]'
2417 b'[Ynsa?]'
2417 b'$$ &Yes, forget this file'
2418 b'$$ &Yes, forget this file'
2418 b'$$ &No, skip this file'
2419 b'$$ &No, skip this file'
2419 b'$$ &Skip remaining files'
2420 b'$$ &Skip remaining files'
2420 b'$$ Include &all remaining files'
2421 b'$$ Include &all remaining files'
2421 b'$$ &? (display help)'
2422 b'$$ &? (display help)'
2422 )
2423 )
2423 for filename in forget[:]:
2424 for filename in forget[:]:
2424 r = ui.promptchoice(
2425 r = ui.promptchoice(
2425 _(b'forget %s %s') % (uipathfn(filename), responses)
2426 _(b'forget %s %s') % (uipathfn(filename), responses)
2426 )
2427 )
2427 if r == 4: # ?
2428 if r == 4: # ?
2428 while r == 4:
2429 while r == 4:
2429 for c, t in ui.extractchoices(responses)[1]:
2430 for c, t in ui.extractchoices(responses)[1]:
2430 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2431 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2431 r = ui.promptchoice(
2432 r = ui.promptchoice(
2432 _(b'forget %s %s') % (uipathfn(filename), responses)
2433 _(b'forget %s %s') % (uipathfn(filename), responses)
2433 )
2434 )
2434 if r == 0: # yes
2435 if r == 0: # yes
2435 continue
2436 continue
2436 elif r == 1: # no
2437 elif r == 1: # no
2437 forget.remove(filename)
2438 forget.remove(filename)
2438 elif r == 2: # Skip
2439 elif r == 2: # Skip
2439 fnindex = forget.index(filename)
2440 fnindex = forget.index(filename)
2440 del forget[fnindex:]
2441 del forget[fnindex:]
2441 break
2442 break
2442 elif r == 3: # All
2443 elif r == 3: # All
2443 break
2444 break
2444
2445
2445 for f in forget:
2446 for f in forget:
2446 if ui.verbose or not match.exact(f) or interactive:
2447 if ui.verbose or not match.exact(f) or interactive:
2447 ui.status(
2448 ui.status(
2448 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2449 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2449 )
2450 )
2450
2451
2451 if not dryrun:
2452 if not dryrun:
2452 rejected = wctx.forget(forget, prefix)
2453 rejected = wctx.forget(forget, prefix)
2453 bad.extend(f for f in rejected if f in match.files())
2454 bad.extend(f for f in rejected if f in match.files())
2454 forgot.extend(f for f in forget if f not in rejected)
2455 forgot.extend(f for f in forget if f not in rejected)
2455 return bad, forgot
2456 return bad, forgot
2456
2457
2457
2458
2458 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2459 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2459 ret = 1
2460 ret = 1
2460
2461
2461 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2462 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2462 if fm.isplain() and not needsfctx:
2463 if fm.isplain() and not needsfctx:
2463 # Fast path. The speed-up comes from skipping the formatter, and batching
2464 # Fast path. The speed-up comes from skipping the formatter, and batching
2464 # calls to ui.write.
2465 # calls to ui.write.
2465 buf = []
2466 buf = []
2466 for f in ctx.matches(m):
2467 for f in ctx.matches(m):
2467 buf.append(fmt % uipathfn(f))
2468 buf.append(fmt % uipathfn(f))
2468 if len(buf) > 100:
2469 if len(buf) > 100:
2469 ui.write(b''.join(buf))
2470 ui.write(b''.join(buf))
2470 del buf[:]
2471 del buf[:]
2471 ret = 0
2472 ret = 0
2472 if buf:
2473 if buf:
2473 ui.write(b''.join(buf))
2474 ui.write(b''.join(buf))
2474 else:
2475 else:
2475 for f in ctx.matches(m):
2476 for f in ctx.matches(m):
2476 fm.startitem()
2477 fm.startitem()
2477 fm.context(ctx=ctx)
2478 fm.context(ctx=ctx)
2478 if needsfctx:
2479 if needsfctx:
2479 fc = ctx[f]
2480 fc = ctx[f]
2480 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2481 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2481 fm.data(path=f)
2482 fm.data(path=f)
2482 fm.plain(fmt % uipathfn(f))
2483 fm.plain(fmt % uipathfn(f))
2483 ret = 0
2484 ret = 0
2484
2485
2485 for subpath in sorted(ctx.substate):
2486 for subpath in sorted(ctx.substate):
2486 submatch = matchmod.subdirmatcher(subpath, m)
2487 submatch = matchmod.subdirmatcher(subpath, m)
2487 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2488 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2488 if subrepos or m.exact(subpath) or any(submatch.files()):
2489 if subrepos or m.exact(subpath) or any(submatch.files()):
2489 sub = ctx.sub(subpath)
2490 sub = ctx.sub(subpath)
2490 try:
2491 try:
2491 recurse = m.exact(subpath) or subrepos
2492 recurse = m.exact(subpath) or subrepos
2492 if (
2493 if (
2493 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2494 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2494 == 0
2495 == 0
2495 ):
2496 ):
2496 ret = 0
2497 ret = 0
2497 except error.LookupError:
2498 except error.LookupError:
2498 ui.status(
2499 ui.status(
2499 _(b"skipping missing subrepository: %s\n")
2500 _(b"skipping missing subrepository: %s\n")
2500 % uipathfn(subpath)
2501 % uipathfn(subpath)
2501 )
2502 )
2502
2503
2503 return ret
2504 return ret
2504
2505
2505
2506
2506 def remove(
2507 def remove(
2507 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2508 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2508 ):
2509 ):
2509 ret = 0
2510 ret = 0
2510 s = repo.status(match=m, clean=True)
2511 s = repo.status(match=m, clean=True)
2511 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2512 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2512
2513
2513 wctx = repo[None]
2514 wctx = repo[None]
2514
2515
2515 if warnings is None:
2516 if warnings is None:
2516 warnings = []
2517 warnings = []
2517 warn = True
2518 warn = True
2518 else:
2519 else:
2519 warn = False
2520 warn = False
2520
2521
2521 subs = sorted(wctx.substate)
2522 subs = sorted(wctx.substate)
2522 progress = ui.makeprogress(
2523 progress = ui.makeprogress(
2523 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2524 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2524 )
2525 )
2525 for subpath in subs:
2526 for subpath in subs:
2526 submatch = matchmod.subdirmatcher(subpath, m)
2527 submatch = matchmod.subdirmatcher(subpath, m)
2527 subprefix = repo.wvfs.reljoin(prefix, subpath)
2528 subprefix = repo.wvfs.reljoin(prefix, subpath)
2528 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2529 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2529 if subrepos or m.exact(subpath) or any(submatch.files()):
2530 if subrepos or m.exact(subpath) or any(submatch.files()):
2530 progress.increment()
2531 progress.increment()
2531 sub = wctx.sub(subpath)
2532 sub = wctx.sub(subpath)
2532 try:
2533 try:
2533 if sub.removefiles(
2534 if sub.removefiles(
2534 submatch,
2535 submatch,
2535 subprefix,
2536 subprefix,
2536 subuipathfn,
2537 subuipathfn,
2537 after,
2538 after,
2538 force,
2539 force,
2539 subrepos,
2540 subrepos,
2540 dryrun,
2541 dryrun,
2541 warnings,
2542 warnings,
2542 ):
2543 ):
2543 ret = 1
2544 ret = 1
2544 except error.LookupError:
2545 except error.LookupError:
2545 warnings.append(
2546 warnings.append(
2546 _(b"skipping missing subrepository: %s\n")
2547 _(b"skipping missing subrepository: %s\n")
2547 % uipathfn(subpath)
2548 % uipathfn(subpath)
2548 )
2549 )
2549 progress.complete()
2550 progress.complete()
2550
2551
2551 # warn about failure to delete explicit files/dirs
2552 # warn about failure to delete explicit files/dirs
2552 deleteddirs = pathutil.dirs(deleted)
2553 deleteddirs = pathutil.dirs(deleted)
2553 files = m.files()
2554 files = m.files()
2554 progress = ui.makeprogress(
2555 progress = ui.makeprogress(
2555 _(b'deleting'), total=len(files), unit=_(b'files')
2556 _(b'deleting'), total=len(files), unit=_(b'files')
2556 )
2557 )
2557 for f in files:
2558 for f in files:
2558
2559
2559 def insubrepo():
2560 def insubrepo():
2560 for subpath in wctx.substate:
2561 for subpath in wctx.substate:
2561 if f.startswith(subpath + b'/'):
2562 if f.startswith(subpath + b'/'):
2562 return True
2563 return True
2563 return False
2564 return False
2564
2565
2565 progress.increment()
2566 progress.increment()
2566 isdir = f in deleteddirs or wctx.hasdir(f)
2567 isdir = f in deleteddirs or wctx.hasdir(f)
2567 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2568 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2568 continue
2569 continue
2569
2570
2570 if repo.wvfs.exists(f):
2571 if repo.wvfs.exists(f):
2571 if repo.wvfs.isdir(f):
2572 if repo.wvfs.isdir(f):
2572 warnings.append(
2573 warnings.append(
2573 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2574 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2574 )
2575 )
2575 else:
2576 else:
2576 warnings.append(
2577 warnings.append(
2577 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2578 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2578 )
2579 )
2579 # missing files will generate a warning elsewhere
2580 # missing files will generate a warning elsewhere
2580 ret = 1
2581 ret = 1
2581 progress.complete()
2582 progress.complete()
2582
2583
2583 if force:
2584 if force:
2584 list = modified + deleted + clean + added
2585 list = modified + deleted + clean + added
2585 elif after:
2586 elif after:
2586 list = deleted
2587 list = deleted
2587 remaining = modified + added + clean
2588 remaining = modified + added + clean
2588 progress = ui.makeprogress(
2589 progress = ui.makeprogress(
2589 _(b'skipping'), total=len(remaining), unit=_(b'files')
2590 _(b'skipping'), total=len(remaining), unit=_(b'files')
2590 )
2591 )
2591 for f in remaining:
2592 for f in remaining:
2592 progress.increment()
2593 progress.increment()
2593 if ui.verbose or (f in files):
2594 if ui.verbose or (f in files):
2594 warnings.append(
2595 warnings.append(
2595 _(b'not removing %s: file still exists\n') % uipathfn(f)
2596 _(b'not removing %s: file still exists\n') % uipathfn(f)
2596 )
2597 )
2597 ret = 1
2598 ret = 1
2598 progress.complete()
2599 progress.complete()
2599 else:
2600 else:
2600 list = deleted + clean
2601 list = deleted + clean
2601 progress = ui.makeprogress(
2602 progress = ui.makeprogress(
2602 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2603 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2603 )
2604 )
2604 for f in modified:
2605 for f in modified:
2605 progress.increment()
2606 progress.increment()
2606 warnings.append(
2607 warnings.append(
2607 _(
2608 _(
2608 b'not removing %s: file is modified (use -f'
2609 b'not removing %s: file is modified (use -f'
2609 b' to force removal)\n'
2610 b' to force removal)\n'
2610 )
2611 )
2611 % uipathfn(f)
2612 % uipathfn(f)
2612 )
2613 )
2613 ret = 1
2614 ret = 1
2614 for f in added:
2615 for f in added:
2615 progress.increment()
2616 progress.increment()
2616 warnings.append(
2617 warnings.append(
2617 _(
2618 _(
2618 b"not removing %s: file has been marked for add"
2619 b"not removing %s: file has been marked for add"
2619 b" (use 'hg forget' to undo add)\n"
2620 b" (use 'hg forget' to undo add)\n"
2620 )
2621 )
2621 % uipathfn(f)
2622 % uipathfn(f)
2622 )
2623 )
2623 ret = 1
2624 ret = 1
2624 progress.complete()
2625 progress.complete()
2625
2626
2626 list = sorted(list)
2627 list = sorted(list)
2627 progress = ui.makeprogress(
2628 progress = ui.makeprogress(
2628 _(b'deleting'), total=len(list), unit=_(b'files')
2629 _(b'deleting'), total=len(list), unit=_(b'files')
2629 )
2630 )
2630 for f in list:
2631 for f in list:
2631 if ui.verbose or not m.exact(f):
2632 if ui.verbose or not m.exact(f):
2632 progress.increment()
2633 progress.increment()
2633 ui.status(
2634 ui.status(
2634 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2635 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2635 )
2636 )
2636 progress.complete()
2637 progress.complete()
2637
2638
2638 if not dryrun:
2639 if not dryrun:
2639 with repo.wlock():
2640 with repo.wlock():
2640 if not after:
2641 if not after:
2641 for f in list:
2642 for f in list:
2642 if f in added:
2643 if f in added:
2643 continue # we never unlink added files on remove
2644 continue # we never unlink added files on remove
2644 rmdir = repo.ui.configbool(
2645 rmdir = repo.ui.configbool(
2645 b'experimental', b'removeemptydirs'
2646 b'experimental', b'removeemptydirs'
2646 )
2647 )
2647 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2648 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2648 repo[None].forget(list)
2649 repo[None].forget(list)
2649
2650
2650 if warn:
2651 if warn:
2651 for warning in warnings:
2652 for warning in warnings:
2652 ui.warn(warning)
2653 ui.warn(warning)
2653
2654
2654 return ret
2655 return ret
2655
2656
2656
2657
2657 def _catfmtneedsdata(fm):
2658 def _catfmtneedsdata(fm):
2658 return not fm.datahint() or b'data' in fm.datahint()
2659 return not fm.datahint() or b'data' in fm.datahint()
2659
2660
2660
2661
2661 def _updatecatformatter(fm, ctx, matcher, path, decode):
2662 def _updatecatformatter(fm, ctx, matcher, path, decode):
2662 """Hook for adding data to the formatter used by ``hg cat``.
2663 """Hook for adding data to the formatter used by ``hg cat``.
2663
2664
2664 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2665 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2665 this method first."""
2666 this method first."""
2666
2667
2667 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2668 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2668 # wasn't requested.
2669 # wasn't requested.
2669 data = b''
2670 data = b''
2670 if _catfmtneedsdata(fm):
2671 if _catfmtneedsdata(fm):
2671 data = ctx[path].data()
2672 data = ctx[path].data()
2672 if decode:
2673 if decode:
2673 data = ctx.repo().wwritedata(path, data)
2674 data = ctx.repo().wwritedata(path, data)
2674 fm.startitem()
2675 fm.startitem()
2675 fm.context(ctx=ctx)
2676 fm.context(ctx=ctx)
2676 fm.write(b'data', b'%s', data)
2677 fm.write(b'data', b'%s', data)
2677 fm.data(path=path)
2678 fm.data(path=path)
2678
2679
2679
2680
2680 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2681 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2681 err = 1
2682 err = 1
2682 opts = pycompat.byteskwargs(opts)
2683 opts = pycompat.byteskwargs(opts)
2683
2684
2684 def write(path):
2685 def write(path):
2685 filename = None
2686 filename = None
2686 if fntemplate:
2687 if fntemplate:
2687 filename = makefilename(
2688 filename = makefilename(
2688 ctx, fntemplate, pathname=os.path.join(prefix, path)
2689 ctx, fntemplate, pathname=os.path.join(prefix, path)
2689 )
2690 )
2690 # attempt to create the directory if it does not already exist
2691 # attempt to create the directory if it does not already exist
2691 try:
2692 try:
2692 os.makedirs(os.path.dirname(filename))
2693 os.makedirs(os.path.dirname(filename))
2693 except OSError:
2694 except OSError:
2694 pass
2695 pass
2695 with formatter.maybereopen(basefm, filename) as fm:
2696 with formatter.maybereopen(basefm, filename) as fm:
2696 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2697 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2697
2698
2698 # Automation often uses hg cat on single files, so special case it
2699 # Automation often uses hg cat on single files, so special case it
2699 # for performance to avoid the cost of parsing the manifest.
2700 # for performance to avoid the cost of parsing the manifest.
2700 if len(matcher.files()) == 1 and not matcher.anypats():
2701 if len(matcher.files()) == 1 and not matcher.anypats():
2701 file = matcher.files()[0]
2702 file = matcher.files()[0]
2702 mfl = repo.manifestlog
2703 mfl = repo.manifestlog
2703 mfnode = ctx.manifestnode()
2704 mfnode = ctx.manifestnode()
2704 try:
2705 try:
2705 if mfnode and mfl[mfnode].find(file)[0]:
2706 if mfnode and mfl[mfnode].find(file)[0]:
2706 if _catfmtneedsdata(basefm):
2707 if _catfmtneedsdata(basefm):
2707 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2708 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2708 write(file)
2709 write(file)
2709 return 0
2710 return 0
2710 except KeyError:
2711 except KeyError:
2711 pass
2712 pass
2712
2713
2713 if _catfmtneedsdata(basefm):
2714 if _catfmtneedsdata(basefm):
2714 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2715 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2715
2716
2716 for abs in ctx.walk(matcher):
2717 for abs in ctx.walk(matcher):
2717 write(abs)
2718 write(abs)
2718 err = 0
2719 err = 0
2719
2720
2720 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2721 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2721 for subpath in sorted(ctx.substate):
2722 for subpath in sorted(ctx.substate):
2722 sub = ctx.sub(subpath)
2723 sub = ctx.sub(subpath)
2723 try:
2724 try:
2724 submatch = matchmod.subdirmatcher(subpath, matcher)
2725 submatch = matchmod.subdirmatcher(subpath, matcher)
2725 subprefix = os.path.join(prefix, subpath)
2726 subprefix = os.path.join(prefix, subpath)
2726 if not sub.cat(
2727 if not sub.cat(
2727 submatch,
2728 submatch,
2728 basefm,
2729 basefm,
2729 fntemplate,
2730 fntemplate,
2730 subprefix,
2731 subprefix,
2731 **pycompat.strkwargs(opts)
2732 **pycompat.strkwargs(opts)
2732 ):
2733 ):
2733 err = 0
2734 err = 0
2734 except error.RepoLookupError:
2735 except error.RepoLookupError:
2735 ui.status(
2736 ui.status(
2736 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2737 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2737 )
2738 )
2738
2739
2739 return err
2740 return err
2740
2741
2741
2742
2742 def commit(ui, repo, commitfunc, pats, opts):
2743 def commit(ui, repo, commitfunc, pats, opts):
2743 '''commit the specified files or all outstanding changes'''
2744 '''commit the specified files or all outstanding changes'''
2744 date = opts.get(b'date')
2745 date = opts.get(b'date')
2745 if date:
2746 if date:
2746 opts[b'date'] = dateutil.parsedate(date)
2747 opts[b'date'] = dateutil.parsedate(date)
2747 message = logmessage(ui, opts)
2748 message = logmessage(ui, opts)
2748 matcher = scmutil.match(repo[None], pats, opts)
2749 matcher = scmutil.match(repo[None], pats, opts)
2749
2750
2750 dsguard = None
2751 dsguard = None
2751 # extract addremove carefully -- this function can be called from a command
2752 # extract addremove carefully -- this function can be called from a command
2752 # that doesn't support addremove
2753 # that doesn't support addremove
2753 if opts.get(b'addremove'):
2754 if opts.get(b'addremove'):
2754 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2755 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2755 with dsguard or util.nullcontextmanager():
2756 with dsguard or util.nullcontextmanager():
2756 if dsguard:
2757 if dsguard:
2757 relative = scmutil.anypats(pats, opts)
2758 relative = scmutil.anypats(pats, opts)
2758 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2759 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2759 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2760 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2760 raise error.Abort(
2761 raise error.Abort(
2761 _(b"failed to mark all new/missing files as added/removed")
2762 _(b"failed to mark all new/missing files as added/removed")
2762 )
2763 )
2763
2764
2764 return commitfunc(ui, repo, message, matcher, opts)
2765 return commitfunc(ui, repo, message, matcher, opts)
2765
2766
2766
2767
2767 def samefile(f, ctx1, ctx2):
2768 def samefile(f, ctx1, ctx2):
2768 if f in ctx1.manifest():
2769 if f in ctx1.manifest():
2769 a = ctx1.filectx(f)
2770 a = ctx1.filectx(f)
2770 if f in ctx2.manifest():
2771 if f in ctx2.manifest():
2771 b = ctx2.filectx(f)
2772 b = ctx2.filectx(f)
2772 return not a.cmp(b) and a.flags() == b.flags()
2773 return not a.cmp(b) and a.flags() == b.flags()
2773 else:
2774 else:
2774 return False
2775 return False
2775 else:
2776 else:
2776 return f not in ctx2.manifest()
2777 return f not in ctx2.manifest()
2777
2778
2778
2779
2779 def amend(ui, repo, old, extra, pats, opts):
2780 def amend(ui, repo, old, extra, pats, opts):
2780 # avoid cycle context -> subrepo -> cmdutil
2781 # avoid cycle context -> subrepo -> cmdutil
2781 from . import context
2782 from . import context
2782
2783
2783 # amend will reuse the existing user if not specified, but the obsolete
2784 # amend will reuse the existing user if not specified, but the obsolete
2784 # marker creation requires that the current user's name is specified.
2785 # marker creation requires that the current user's name is specified.
2785 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2786 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2786 ui.username() # raise exception if username not set
2787 ui.username() # raise exception if username not set
2787
2788
2788 ui.note(_(b'amending changeset %s\n') % old)
2789 ui.note(_(b'amending changeset %s\n') % old)
2789 base = old.p1()
2790 base = old.p1()
2790
2791
2791 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2792 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2792 # Participating changesets:
2793 # Participating changesets:
2793 #
2794 #
2794 # wctx o - workingctx that contains changes from working copy
2795 # wctx o - workingctx that contains changes from working copy
2795 # | to go into amending commit
2796 # | to go into amending commit
2796 # |
2797 # |
2797 # old o - changeset to amend
2798 # old o - changeset to amend
2798 # |
2799 # |
2799 # base o - first parent of the changeset to amend
2800 # base o - first parent of the changeset to amend
2800 wctx = repo[None]
2801 wctx = repo[None]
2801
2802
2802 # Copy to avoid mutating input
2803 # Copy to avoid mutating input
2803 extra = extra.copy()
2804 extra = extra.copy()
2804 # Update extra dict from amended commit (e.g. to preserve graft
2805 # Update extra dict from amended commit (e.g. to preserve graft
2805 # source)
2806 # source)
2806 extra.update(old.extra())
2807 extra.update(old.extra())
2807
2808
2808 # Also update it from the from the wctx
2809 # Also update it from the from the wctx
2809 extra.update(wctx.extra())
2810 extra.update(wctx.extra())
2810
2811
2811 # date-only change should be ignored?
2812 # date-only change should be ignored?
2812 datemaydiffer = resolvecommitoptions(ui, opts)
2813 datemaydiffer = resolvecommitoptions(ui, opts)
2813
2814
2814 date = old.date()
2815 date = old.date()
2815 if opts.get(b'date'):
2816 if opts.get(b'date'):
2816 date = dateutil.parsedate(opts.get(b'date'))
2817 date = dateutil.parsedate(opts.get(b'date'))
2817 user = opts.get(b'user') or old.user()
2818 user = opts.get(b'user') or old.user()
2818
2819
2819 if len(old.parents()) > 1:
2820 if len(old.parents()) > 1:
2820 # ctx.files() isn't reliable for merges, so fall back to the
2821 # ctx.files() isn't reliable for merges, so fall back to the
2821 # slower repo.status() method
2822 # slower repo.status() method
2822 st = base.status(old)
2823 st = base.status(old)
2823 files = set(st.modified) | set(st.added) | set(st.removed)
2824 files = set(st.modified) | set(st.added) | set(st.removed)
2824 else:
2825 else:
2825 files = set(old.files())
2826 files = set(old.files())
2826
2827
2827 # add/remove the files to the working copy if the "addremove" option
2828 # add/remove the files to the working copy if the "addremove" option
2828 # was specified.
2829 # was specified.
2829 matcher = scmutil.match(wctx, pats, opts)
2830 matcher = scmutil.match(wctx, pats, opts)
2830 relative = scmutil.anypats(pats, opts)
2831 relative = scmutil.anypats(pats, opts)
2831 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2832 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2832 if opts.get(b'addremove') and scmutil.addremove(
2833 if opts.get(b'addremove') and scmutil.addremove(
2833 repo, matcher, b"", uipathfn, opts
2834 repo, matcher, b"", uipathfn, opts
2834 ):
2835 ):
2835 raise error.Abort(
2836 raise error.Abort(
2836 _(b"failed to mark all new/missing files as added/removed")
2837 _(b"failed to mark all new/missing files as added/removed")
2837 )
2838 )
2838
2839
2839 # Check subrepos. This depends on in-place wctx._status update in
2840 # Check subrepos. This depends on in-place wctx._status update in
2840 # subrepo.precommit(). To minimize the risk of this hack, we do
2841 # subrepo.precommit(). To minimize the risk of this hack, we do
2841 # nothing if .hgsub does not exist.
2842 # nothing if .hgsub does not exist.
2842 if b'.hgsub' in wctx or b'.hgsub' in old:
2843 if b'.hgsub' in wctx or b'.hgsub' in old:
2843 subs, commitsubs, newsubstate = subrepoutil.precommit(
2844 subs, commitsubs, newsubstate = subrepoutil.precommit(
2844 ui, wctx, wctx._status, matcher
2845 ui, wctx, wctx._status, matcher
2845 )
2846 )
2846 # amend should abort if commitsubrepos is enabled
2847 # amend should abort if commitsubrepos is enabled
2847 assert not commitsubs
2848 assert not commitsubs
2848 if subs:
2849 if subs:
2849 subrepoutil.writestate(repo, newsubstate)
2850 subrepoutil.writestate(repo, newsubstate)
2850
2851
2851 ms = mergestatemod.mergestate.read(repo)
2852 ms = mergestatemod.mergestate.read(repo)
2852 mergeutil.checkunresolved(ms)
2853 mergeutil.checkunresolved(ms)
2853
2854
2854 filestoamend = {f for f in wctx.files() if matcher(f)}
2855 filestoamend = {f for f in wctx.files() if matcher(f)}
2855
2856
2856 changes = len(filestoamend) > 0
2857 changes = len(filestoamend) > 0
2857 if changes:
2858 if changes:
2858 # Recompute copies (avoid recording a -> b -> a)
2859 # Recompute copies (avoid recording a -> b -> a)
2859 copied = copies.pathcopies(base, wctx, matcher)
2860 copied = copies.pathcopies(base, wctx, matcher)
2860 if old.p2:
2861 if old.p2:
2861 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2862 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2862
2863
2863 # Prune files which were reverted by the updates: if old
2864 # Prune files which were reverted by the updates: if old
2864 # introduced file X and the file was renamed in the working
2865 # introduced file X and the file was renamed in the working
2865 # copy, then those two files are the same and
2866 # copy, then those two files are the same and
2866 # we can discard X from our list of files. Likewise if X
2867 # we can discard X from our list of files. Likewise if X
2867 # was removed, it's no longer relevant. If X is missing (aka
2868 # was removed, it's no longer relevant. If X is missing (aka
2868 # deleted), old X must be preserved.
2869 # deleted), old X must be preserved.
2869 files.update(filestoamend)
2870 files.update(filestoamend)
2870 files = [
2871 files = [
2871 f
2872 f
2872 for f in files
2873 for f in files
2873 if (f not in filestoamend or not samefile(f, wctx, base))
2874 if (f not in filestoamend or not samefile(f, wctx, base))
2874 ]
2875 ]
2875
2876
2876 def filectxfn(repo, ctx_, path):
2877 def filectxfn(repo, ctx_, path):
2877 try:
2878 try:
2878 # If the file being considered is not amongst the files
2879 # If the file being considered is not amongst the files
2879 # to be amended, we should return the file context from the
2880 # to be amended, we should return the file context from the
2880 # old changeset. This avoids issues when only some files in
2881 # old changeset. This avoids issues when only some files in
2881 # the working copy are being amended but there are also
2882 # the working copy are being amended but there are also
2882 # changes to other files from the old changeset.
2883 # changes to other files from the old changeset.
2883 if path not in filestoamend:
2884 if path not in filestoamend:
2884 return old.filectx(path)
2885 return old.filectx(path)
2885
2886
2886 # Return None for removed files.
2887 # Return None for removed files.
2887 if path in wctx.removed():
2888 if path in wctx.removed():
2888 return None
2889 return None
2889
2890
2890 fctx = wctx[path]
2891 fctx = wctx[path]
2891 flags = fctx.flags()
2892 flags = fctx.flags()
2892 mctx = context.memfilectx(
2893 mctx = context.memfilectx(
2893 repo,
2894 repo,
2894 ctx_,
2895 ctx_,
2895 fctx.path(),
2896 fctx.path(),
2896 fctx.data(),
2897 fctx.data(),
2897 islink=b'l' in flags,
2898 islink=b'l' in flags,
2898 isexec=b'x' in flags,
2899 isexec=b'x' in flags,
2899 copysource=copied.get(path),
2900 copysource=copied.get(path),
2900 )
2901 )
2901 return mctx
2902 return mctx
2902 except KeyError:
2903 except KeyError:
2903 return None
2904 return None
2904
2905
2905 else:
2906 else:
2906 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2907 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2907
2908
2908 # Use version of files as in the old cset
2909 # Use version of files as in the old cset
2909 def filectxfn(repo, ctx_, path):
2910 def filectxfn(repo, ctx_, path):
2910 try:
2911 try:
2911 return old.filectx(path)
2912 return old.filectx(path)
2912 except KeyError:
2913 except KeyError:
2913 return None
2914 return None
2914
2915
2915 # See if we got a message from -m or -l, if not, open the editor with
2916 # See if we got a message from -m or -l, if not, open the editor with
2916 # the message of the changeset to amend.
2917 # the message of the changeset to amend.
2917 message = logmessage(ui, opts)
2918 message = logmessage(ui, opts)
2918
2919
2919 editform = mergeeditform(old, b'commit.amend')
2920 editform = mergeeditform(old, b'commit.amend')
2920
2921
2921 if not message:
2922 if not message:
2922 message = old.description()
2923 message = old.description()
2923 # Default if message isn't provided and --edit is not passed is to
2924 # Default if message isn't provided and --edit is not passed is to
2924 # invoke editor, but allow --no-edit. If somehow we don't have any
2925 # invoke editor, but allow --no-edit. If somehow we don't have any
2925 # description, let's always start the editor.
2926 # description, let's always start the editor.
2926 doedit = not message or opts.get(b'edit') in [True, None]
2927 doedit = not message or opts.get(b'edit') in [True, None]
2927 else:
2928 else:
2928 # Default if message is provided is to not invoke editor, but allow
2929 # Default if message is provided is to not invoke editor, but allow
2929 # --edit.
2930 # --edit.
2930 doedit = opts.get(b'edit') is True
2931 doedit = opts.get(b'edit') is True
2931 editor = getcommiteditor(edit=doedit, editform=editform)
2932 editor = getcommiteditor(edit=doedit, editform=editform)
2932
2933
2933 pureextra = extra.copy()
2934 pureextra = extra.copy()
2934 extra[b'amend_source'] = old.hex()
2935 extra[b'amend_source'] = old.hex()
2935
2936
2936 new = context.memctx(
2937 new = context.memctx(
2937 repo,
2938 repo,
2938 parents=[base.node(), old.p2().node()],
2939 parents=[base.node(), old.p2().node()],
2939 text=message,
2940 text=message,
2940 files=files,
2941 files=files,
2941 filectxfn=filectxfn,
2942 filectxfn=filectxfn,
2942 user=user,
2943 user=user,
2943 date=date,
2944 date=date,
2944 extra=extra,
2945 extra=extra,
2945 editor=editor,
2946 editor=editor,
2946 )
2947 )
2947
2948
2948 newdesc = changelog.stripdesc(new.description())
2949 newdesc = changelog.stripdesc(new.description())
2949 if (
2950 if (
2950 (not changes)
2951 (not changes)
2951 and newdesc == old.description()
2952 and newdesc == old.description()
2952 and user == old.user()
2953 and user == old.user()
2953 and (date == old.date() or datemaydiffer)
2954 and (date == old.date() or datemaydiffer)
2954 and pureextra == old.extra()
2955 and pureextra == old.extra()
2955 ):
2956 ):
2956 # nothing changed. continuing here would create a new node
2957 # nothing changed. continuing here would create a new node
2957 # anyway because of the amend_source noise.
2958 # anyway because of the amend_source noise.
2958 #
2959 #
2959 # This not what we expect from amend.
2960 # This not what we expect from amend.
2960 return old.node()
2961 return old.node()
2961
2962
2962 commitphase = None
2963 commitphase = None
2963 if opts.get(b'secret'):
2964 if opts.get(b'secret'):
2964 commitphase = phases.secret
2965 commitphase = phases.secret
2965 newid = repo.commitctx(new)
2966 newid = repo.commitctx(new)
2966 ms.reset()
2967 ms.reset()
2967
2968
2968 # Reroute the working copy parent to the new changeset
2969 # Reroute the working copy parent to the new changeset
2969 repo.setparents(newid, nullid)
2970 repo.setparents(newid, nullid)
2970
2971
2971 # Fixing the dirstate because localrepo.commitctx does not update
2972 # Fixing the dirstate because localrepo.commitctx does not update
2972 # it. This is rather convenient because we did not need to update
2973 # it. This is rather convenient because we did not need to update
2973 # the dirstate for all the files in the new commit which commitctx
2974 # the dirstate for all the files in the new commit which commitctx
2974 # could have done if it updated the dirstate. Now, we can
2975 # could have done if it updated the dirstate. Now, we can
2975 # selectively update the dirstate only for the amended files.
2976 # selectively update the dirstate only for the amended files.
2976 dirstate = repo.dirstate
2977 dirstate = repo.dirstate
2977
2978
2978 # Update the state of the files which were added and modified in the
2979 # Update the state of the files which were added and modified in the
2979 # amend to "normal" in the dirstate. We need to use "normallookup" since
2980 # amend to "normal" in the dirstate. We need to use "normallookup" since
2980 # the files may have changed since the command started; using "normal"
2981 # the files may have changed since the command started; using "normal"
2981 # would mark them as clean but with uncommitted contents.
2982 # would mark them as clean but with uncommitted contents.
2982 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2983 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2983 for f in normalfiles:
2984 for f in normalfiles:
2984 dirstate.normallookup(f)
2985 dirstate.normallookup(f)
2985
2986
2986 # Update the state of files which were removed in the amend
2987 # Update the state of files which were removed in the amend
2987 # to "removed" in the dirstate.
2988 # to "removed" in the dirstate.
2988 removedfiles = set(wctx.removed()) & filestoamend
2989 removedfiles = set(wctx.removed()) & filestoamend
2989 for f in removedfiles:
2990 for f in removedfiles:
2990 dirstate.drop(f)
2991 dirstate.drop(f)
2991
2992
2992 mapping = {old.node(): (newid,)}
2993 mapping = {old.node(): (newid,)}
2993 obsmetadata = None
2994 obsmetadata = None
2994 if opts.get(b'note'):
2995 if opts.get(b'note'):
2995 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
2996 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
2996 backup = ui.configbool(b'rewrite', b'backup-bundle')
2997 backup = ui.configbool(b'rewrite', b'backup-bundle')
2997 scmutil.cleanupnodes(
2998 scmutil.cleanupnodes(
2998 repo,
2999 repo,
2999 mapping,
3000 mapping,
3000 b'amend',
3001 b'amend',
3001 metadata=obsmetadata,
3002 metadata=obsmetadata,
3002 fixphase=True,
3003 fixphase=True,
3003 targetphase=commitphase,
3004 targetphase=commitphase,
3004 backup=backup,
3005 backup=backup,
3005 )
3006 )
3006
3007
3007 return newid
3008 return newid
3008
3009
3009
3010
3010 def commiteditor(repo, ctx, subs, editform=b''):
3011 def commiteditor(repo, ctx, subs, editform=b''):
3011 if ctx.description():
3012 if ctx.description():
3012 return ctx.description()
3013 return ctx.description()
3013 return commitforceeditor(
3014 return commitforceeditor(
3014 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3015 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3015 )
3016 )
3016
3017
3017
3018
3018 def commitforceeditor(
3019 def commitforceeditor(
3019 repo,
3020 repo,
3020 ctx,
3021 ctx,
3021 subs,
3022 subs,
3022 finishdesc=None,
3023 finishdesc=None,
3023 extramsg=None,
3024 extramsg=None,
3024 editform=b'',
3025 editform=b'',
3025 unchangedmessagedetection=False,
3026 unchangedmessagedetection=False,
3026 ):
3027 ):
3027 if not extramsg:
3028 if not extramsg:
3028 extramsg = _(b"Leave message empty to abort commit.")
3029 extramsg = _(b"Leave message empty to abort commit.")
3029
3030
3030 forms = [e for e in editform.split(b'.') if e]
3031 forms = [e for e in editform.split(b'.') if e]
3031 forms.insert(0, b'changeset')
3032 forms.insert(0, b'changeset')
3032 templatetext = None
3033 templatetext = None
3033 while forms:
3034 while forms:
3034 ref = b'.'.join(forms)
3035 ref = b'.'.join(forms)
3035 if repo.ui.config(b'committemplate', ref):
3036 if repo.ui.config(b'committemplate', ref):
3036 templatetext = committext = buildcommittemplate(
3037 templatetext = committext = buildcommittemplate(
3037 repo, ctx, subs, extramsg, ref
3038 repo, ctx, subs, extramsg, ref
3038 )
3039 )
3039 break
3040 break
3040 forms.pop()
3041 forms.pop()
3041 else:
3042 else:
3042 committext = buildcommittext(repo, ctx, subs, extramsg)
3043 committext = buildcommittext(repo, ctx, subs, extramsg)
3043
3044
3044 # run editor in the repository root
3045 # run editor in the repository root
3045 olddir = encoding.getcwd()
3046 olddir = encoding.getcwd()
3046 os.chdir(repo.root)
3047 os.chdir(repo.root)
3047
3048
3048 # make in-memory changes visible to external process
3049 # make in-memory changes visible to external process
3049 tr = repo.currenttransaction()
3050 tr = repo.currenttransaction()
3050 repo.dirstate.write(tr)
3051 repo.dirstate.write(tr)
3051 pending = tr and tr.writepending() and repo.root
3052 pending = tr and tr.writepending() and repo.root
3052
3053
3053 editortext = repo.ui.edit(
3054 editortext = repo.ui.edit(
3054 committext,
3055 committext,
3055 ctx.user(),
3056 ctx.user(),
3056 ctx.extra(),
3057 ctx.extra(),
3057 editform=editform,
3058 editform=editform,
3058 pending=pending,
3059 pending=pending,
3059 repopath=repo.path,
3060 repopath=repo.path,
3060 action=b'commit',
3061 action=b'commit',
3061 )
3062 )
3062 text = editortext
3063 text = editortext
3063
3064
3064 # strip away anything below this special string (used for editors that want
3065 # strip away anything below this special string (used for editors that want
3065 # to display the diff)
3066 # to display the diff)
3066 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3067 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3067 if stripbelow:
3068 if stripbelow:
3068 text = text[: stripbelow.start()]
3069 text = text[: stripbelow.start()]
3069
3070
3070 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3071 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3071 os.chdir(olddir)
3072 os.chdir(olddir)
3072
3073
3073 if finishdesc:
3074 if finishdesc:
3074 text = finishdesc(text)
3075 text = finishdesc(text)
3075 if not text.strip():
3076 if not text.strip():
3076 raise error.InputError(_(b"empty commit message"))
3077 raise error.InputError(_(b"empty commit message"))
3077 if unchangedmessagedetection and editortext == templatetext:
3078 if unchangedmessagedetection and editortext == templatetext:
3078 raise error.InputError(_(b"commit message unchanged"))
3079 raise error.InputError(_(b"commit message unchanged"))
3079
3080
3080 return text
3081 return text
3081
3082
3082
3083
3083 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3084 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3084 ui = repo.ui
3085 ui = repo.ui
3085 spec = formatter.reference_templatespec(ref)
3086 spec = formatter.reference_templatespec(ref)
3086 t = logcmdutil.changesettemplater(ui, repo, spec)
3087 t = logcmdutil.changesettemplater(ui, repo, spec)
3087 t.t.cache.update(
3088 t.t.cache.update(
3088 (k, templater.unquotestring(v))
3089 (k, templater.unquotestring(v))
3089 for k, v in repo.ui.configitems(b'committemplate')
3090 for k, v in repo.ui.configitems(b'committemplate')
3090 )
3091 )
3091
3092
3092 if not extramsg:
3093 if not extramsg:
3093 extramsg = b'' # ensure that extramsg is string
3094 extramsg = b'' # ensure that extramsg is string
3094
3095
3095 ui.pushbuffer()
3096 ui.pushbuffer()
3096 t.show(ctx, extramsg=extramsg)
3097 t.show(ctx, extramsg=extramsg)
3097 return ui.popbuffer()
3098 return ui.popbuffer()
3098
3099
3099
3100
3100 def hgprefix(msg):
3101 def hgprefix(msg):
3101 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3102 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3102
3103
3103
3104
3104 def buildcommittext(repo, ctx, subs, extramsg):
3105 def buildcommittext(repo, ctx, subs, extramsg):
3105 edittext = []
3106 edittext = []
3106 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3107 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3107 if ctx.description():
3108 if ctx.description():
3108 edittext.append(ctx.description())
3109 edittext.append(ctx.description())
3109 edittext.append(b"")
3110 edittext.append(b"")
3110 edittext.append(b"") # Empty line between message and comments.
3111 edittext.append(b"") # Empty line between message and comments.
3111 edittext.append(
3112 edittext.append(
3112 hgprefix(
3113 hgprefix(
3113 _(
3114 _(
3114 b"Enter commit message."
3115 b"Enter commit message."
3115 b" Lines beginning with 'HG:' are removed."
3116 b" Lines beginning with 'HG:' are removed."
3116 )
3117 )
3117 )
3118 )
3118 )
3119 )
3119 edittext.append(hgprefix(extramsg))
3120 edittext.append(hgprefix(extramsg))
3120 edittext.append(b"HG: --")
3121 edittext.append(b"HG: --")
3121 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3122 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3122 if ctx.p2():
3123 if ctx.p2():
3123 edittext.append(hgprefix(_(b"branch merge")))
3124 edittext.append(hgprefix(_(b"branch merge")))
3124 if ctx.branch():
3125 if ctx.branch():
3125 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3126 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3126 if bookmarks.isactivewdirparent(repo):
3127 if bookmarks.isactivewdirparent(repo):
3127 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3128 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3128 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3129 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3129 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3130 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3130 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3131 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3131 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3132 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3132 if not added and not modified and not removed:
3133 if not added and not modified and not removed:
3133 edittext.append(hgprefix(_(b"no files changed")))
3134 edittext.append(hgprefix(_(b"no files changed")))
3134 edittext.append(b"")
3135 edittext.append(b"")
3135
3136
3136 return b"\n".join(edittext)
3137 return b"\n".join(edittext)
3137
3138
3138
3139
3139 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3140 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3140 if opts is None:
3141 if opts is None:
3141 opts = {}
3142 opts = {}
3142 ctx = repo[node]
3143 ctx = repo[node]
3143 parents = ctx.parents()
3144 parents = ctx.parents()
3144
3145
3145 if tip is not None and repo.changelog.tip() == tip:
3146 if tip is not None and repo.changelog.tip() == tip:
3146 # avoid reporting something like "committed new head" when
3147 # avoid reporting something like "committed new head" when
3147 # recommitting old changesets, and issue a helpful warning
3148 # recommitting old changesets, and issue a helpful warning
3148 # for most instances
3149 # for most instances
3149 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3150 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3150 elif (
3151 elif (
3151 not opts.get(b'amend')
3152 not opts.get(b'amend')
3152 and bheads
3153 and bheads
3153 and node not in bheads
3154 and node not in bheads
3154 and not any(
3155 and not any(
3155 p.node() in bheads and p.branch() == branch for p in parents
3156 p.node() in bheads and p.branch() == branch for p in parents
3156 )
3157 )
3157 ):
3158 ):
3158 repo.ui.status(_(b'created new head\n'))
3159 repo.ui.status(_(b'created new head\n'))
3159 # The message is not printed for initial roots. For the other
3160 # The message is not printed for initial roots. For the other
3160 # changesets, it is printed in the following situations:
3161 # changesets, it is printed in the following situations:
3161 #
3162 #
3162 # Par column: for the 2 parents with ...
3163 # Par column: for the 2 parents with ...
3163 # N: null or no parent
3164 # N: null or no parent
3164 # B: parent is on another named branch
3165 # B: parent is on another named branch
3165 # C: parent is a regular non head changeset
3166 # C: parent is a regular non head changeset
3166 # H: parent was a branch head of the current branch
3167 # H: parent was a branch head of the current branch
3167 # Msg column: whether we print "created new head" message
3168 # Msg column: whether we print "created new head" message
3168 # In the following, it is assumed that there already exists some
3169 # In the following, it is assumed that there already exists some
3169 # initial branch heads of the current branch, otherwise nothing is
3170 # initial branch heads of the current branch, otherwise nothing is
3170 # printed anyway.
3171 # printed anyway.
3171 #
3172 #
3172 # Par Msg Comment
3173 # Par Msg Comment
3173 # N N y additional topo root
3174 # N N y additional topo root
3174 #
3175 #
3175 # B N y additional branch root
3176 # B N y additional branch root
3176 # C N y additional topo head
3177 # C N y additional topo head
3177 # H N n usual case
3178 # H N n usual case
3178 #
3179 #
3179 # B B y weird additional branch root
3180 # B B y weird additional branch root
3180 # C B y branch merge
3181 # C B y branch merge
3181 # H B n merge with named branch
3182 # H B n merge with named branch
3182 #
3183 #
3183 # C C y additional head from merge
3184 # C C y additional head from merge
3184 # C H n merge with a head
3185 # C H n merge with a head
3185 #
3186 #
3186 # H H n head merge: head count decreases
3187 # H H n head merge: head count decreases
3187
3188
3188 if not opts.get(b'close_branch'):
3189 if not opts.get(b'close_branch'):
3189 for r in parents:
3190 for r in parents:
3190 if r.closesbranch() and r.branch() == branch:
3191 if r.closesbranch() and r.branch() == branch:
3191 repo.ui.status(
3192 repo.ui.status(
3192 _(b'reopening closed branch head %d\n') % r.rev()
3193 _(b'reopening closed branch head %d\n') % r.rev()
3193 )
3194 )
3194
3195
3195 if repo.ui.debugflag:
3196 if repo.ui.debugflag:
3196 repo.ui.write(
3197 repo.ui.write(
3197 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3198 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3198 )
3199 )
3199 elif repo.ui.verbose:
3200 elif repo.ui.verbose:
3200 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3201 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3201
3202
3202
3203
3203 def postcommitstatus(repo, pats, opts):
3204 def postcommitstatus(repo, pats, opts):
3204 return repo.status(match=scmutil.match(repo[None], pats, opts))
3205 return repo.status(match=scmutil.match(repo[None], pats, opts))
3205
3206
3206
3207
3207 def revert(ui, repo, ctx, *pats, **opts):
3208 def revert(ui, repo, ctx, *pats, **opts):
3208 opts = pycompat.byteskwargs(opts)
3209 opts = pycompat.byteskwargs(opts)
3209 parent, p2 = repo.dirstate.parents()
3210 parent, p2 = repo.dirstate.parents()
3210 node = ctx.node()
3211 node = ctx.node()
3211
3212
3212 mf = ctx.manifest()
3213 mf = ctx.manifest()
3213 if node == p2:
3214 if node == p2:
3214 parent = p2
3215 parent = p2
3215
3216
3216 # need all matching names in dirstate and manifest of target rev,
3217 # need all matching names in dirstate and manifest of target rev,
3217 # so have to walk both. do not print errors if files exist in one
3218 # so have to walk both. do not print errors if files exist in one
3218 # but not other. in both cases, filesets should be evaluated against
3219 # but not other. in both cases, filesets should be evaluated against
3219 # workingctx to get consistent result (issue4497). this means 'set:**'
3220 # workingctx to get consistent result (issue4497). this means 'set:**'
3220 # cannot be used to select missing files from target rev.
3221 # cannot be used to select missing files from target rev.
3221
3222
3222 # `names` is a mapping for all elements in working copy and target revision
3223 # `names` is a mapping for all elements in working copy and target revision
3223 # The mapping is in the form:
3224 # The mapping is in the form:
3224 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3225 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3225 names = {}
3226 names = {}
3226 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3227 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3227
3228
3228 with repo.wlock():
3229 with repo.wlock():
3229 ## filling of the `names` mapping
3230 ## filling of the `names` mapping
3230 # walk dirstate to fill `names`
3231 # walk dirstate to fill `names`
3231
3232
3232 interactive = opts.get(b'interactive', False)
3233 interactive = opts.get(b'interactive', False)
3233 wctx = repo[None]
3234 wctx = repo[None]
3234 m = scmutil.match(wctx, pats, opts)
3235 m = scmutil.match(wctx, pats, opts)
3235
3236
3236 # we'll need this later
3237 # we'll need this later
3237 targetsubs = sorted(s for s in wctx.substate if m(s))
3238 targetsubs = sorted(s for s in wctx.substate if m(s))
3238
3239
3239 if not m.always():
3240 if not m.always():
3240 matcher = matchmod.badmatch(m, lambda x, y: False)
3241 matcher = matchmod.badmatch(m, lambda x, y: False)
3241 for abs in wctx.walk(matcher):
3242 for abs in wctx.walk(matcher):
3242 names[abs] = m.exact(abs)
3243 names[abs] = m.exact(abs)
3243
3244
3244 # walk target manifest to fill `names`
3245 # walk target manifest to fill `names`
3245
3246
3246 def badfn(path, msg):
3247 def badfn(path, msg):
3247 if path in names:
3248 if path in names:
3248 return
3249 return
3249 if path in ctx.substate:
3250 if path in ctx.substate:
3250 return
3251 return
3251 path_ = path + b'/'
3252 path_ = path + b'/'
3252 for f in names:
3253 for f in names:
3253 if f.startswith(path_):
3254 if f.startswith(path_):
3254 return
3255 return
3255 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3256 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3256
3257
3257 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3258 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3258 if abs not in names:
3259 if abs not in names:
3259 names[abs] = m.exact(abs)
3260 names[abs] = m.exact(abs)
3260
3261
3261 # Find status of all file in `names`.
3262 # Find status of all file in `names`.
3262 m = scmutil.matchfiles(repo, names)
3263 m = scmutil.matchfiles(repo, names)
3263
3264
3264 changes = repo.status(
3265 changes = repo.status(
3265 node1=node, match=m, unknown=True, ignored=True, clean=True
3266 node1=node, match=m, unknown=True, ignored=True, clean=True
3266 )
3267 )
3267 else:
3268 else:
3268 changes = repo.status(node1=node, match=m)
3269 changes = repo.status(node1=node, match=m)
3269 for kind in changes:
3270 for kind in changes:
3270 for abs in kind:
3271 for abs in kind:
3271 names[abs] = m.exact(abs)
3272 names[abs] = m.exact(abs)
3272
3273
3273 m = scmutil.matchfiles(repo, names)
3274 m = scmutil.matchfiles(repo, names)
3274
3275
3275 modified = set(changes.modified)
3276 modified = set(changes.modified)
3276 added = set(changes.added)
3277 added = set(changes.added)
3277 removed = set(changes.removed)
3278 removed = set(changes.removed)
3278 _deleted = set(changes.deleted)
3279 _deleted = set(changes.deleted)
3279 unknown = set(changes.unknown)
3280 unknown = set(changes.unknown)
3280 unknown.update(changes.ignored)
3281 unknown.update(changes.ignored)
3281 clean = set(changes.clean)
3282 clean = set(changes.clean)
3282 modadded = set()
3283 modadded = set()
3283
3284
3284 # We need to account for the state of the file in the dirstate,
3285 # We need to account for the state of the file in the dirstate,
3285 # even when we revert against something else than parent. This will
3286 # even when we revert against something else than parent. This will
3286 # slightly alter the behavior of revert (doing back up or not, delete
3287 # slightly alter the behavior of revert (doing back up or not, delete
3287 # or just forget etc).
3288 # or just forget etc).
3288 if parent == node:
3289 if parent == node:
3289 dsmodified = modified
3290 dsmodified = modified
3290 dsadded = added
3291 dsadded = added
3291 dsremoved = removed
3292 dsremoved = removed
3292 # store all local modifications, useful later for rename detection
3293 # store all local modifications, useful later for rename detection
3293 localchanges = dsmodified | dsadded
3294 localchanges = dsmodified | dsadded
3294 modified, added, removed = set(), set(), set()
3295 modified, added, removed = set(), set(), set()
3295 else:
3296 else:
3296 changes = repo.status(node1=parent, match=m)
3297 changes = repo.status(node1=parent, match=m)
3297 dsmodified = set(changes.modified)
3298 dsmodified = set(changes.modified)
3298 dsadded = set(changes.added)
3299 dsadded = set(changes.added)
3299 dsremoved = set(changes.removed)
3300 dsremoved = set(changes.removed)
3300 # store all local modifications, useful later for rename detection
3301 # store all local modifications, useful later for rename detection
3301 localchanges = dsmodified | dsadded
3302 localchanges = dsmodified | dsadded
3302
3303
3303 # only take into account for removes between wc and target
3304 # only take into account for removes between wc and target
3304 clean |= dsremoved - removed
3305 clean |= dsremoved - removed
3305 dsremoved &= removed
3306 dsremoved &= removed
3306 # distinct between dirstate remove and other
3307 # distinct between dirstate remove and other
3307 removed -= dsremoved
3308 removed -= dsremoved
3308
3309
3309 modadded = added & dsmodified
3310 modadded = added & dsmodified
3310 added -= modadded
3311 added -= modadded
3311
3312
3312 # tell newly modified apart.
3313 # tell newly modified apart.
3313 dsmodified &= modified
3314 dsmodified &= modified
3314 dsmodified |= modified & dsadded # dirstate added may need backup
3315 dsmodified |= modified & dsadded # dirstate added may need backup
3315 modified -= dsmodified
3316 modified -= dsmodified
3316
3317
3317 # We need to wait for some post-processing to update this set
3318 # We need to wait for some post-processing to update this set
3318 # before making the distinction. The dirstate will be used for
3319 # before making the distinction. The dirstate will be used for
3319 # that purpose.
3320 # that purpose.
3320 dsadded = added
3321 dsadded = added
3321
3322
3322 # in case of merge, files that are actually added can be reported as
3323 # in case of merge, files that are actually added can be reported as
3323 # modified, we need to post process the result
3324 # modified, we need to post process the result
3324 if p2 != nullid:
3325 if p2 != nullid:
3325 mergeadd = set(dsmodified)
3326 mergeadd = set(dsmodified)
3326 for path in dsmodified:
3327 for path in dsmodified:
3327 if path in mf:
3328 if path in mf:
3328 mergeadd.remove(path)
3329 mergeadd.remove(path)
3329 dsadded |= mergeadd
3330 dsadded |= mergeadd
3330 dsmodified -= mergeadd
3331 dsmodified -= mergeadd
3331
3332
3332 # if f is a rename, update `names` to also revert the source
3333 # if f is a rename, update `names` to also revert the source
3333 for f in localchanges:
3334 for f in localchanges:
3334 src = repo.dirstate.copied(f)
3335 src = repo.dirstate.copied(f)
3335 # XXX should we check for rename down to target node?
3336 # XXX should we check for rename down to target node?
3336 if src and src not in names and repo.dirstate[src] == b'r':
3337 if src and src not in names and repo.dirstate[src] == b'r':
3337 dsremoved.add(src)
3338 dsremoved.add(src)
3338 names[src] = True
3339 names[src] = True
3339
3340
3340 # determine the exact nature of the deleted changesets
3341 # determine the exact nature of the deleted changesets
3341 deladded = set(_deleted)
3342 deladded = set(_deleted)
3342 for path in _deleted:
3343 for path in _deleted:
3343 if path in mf:
3344 if path in mf:
3344 deladded.remove(path)
3345 deladded.remove(path)
3345 deleted = _deleted - deladded
3346 deleted = _deleted - deladded
3346
3347
3347 # distinguish between file to forget and the other
3348 # distinguish between file to forget and the other
3348 added = set()
3349 added = set()
3349 for abs in dsadded:
3350 for abs in dsadded:
3350 if repo.dirstate[abs] != b'a':
3351 if repo.dirstate[abs] != b'a':
3351 added.add(abs)
3352 added.add(abs)
3352 dsadded -= added
3353 dsadded -= added
3353
3354
3354 for abs in deladded:
3355 for abs in deladded:
3355 if repo.dirstate[abs] == b'a':
3356 if repo.dirstate[abs] == b'a':
3356 dsadded.add(abs)
3357 dsadded.add(abs)
3357 deladded -= dsadded
3358 deladded -= dsadded
3358
3359
3359 # For files marked as removed, we check if an unknown file is present at
3360 # For files marked as removed, we check if an unknown file is present at
3360 # the same path. If a such file exists it may need to be backed up.
3361 # the same path. If a such file exists it may need to be backed up.
3361 # Making the distinction at this stage helps have simpler backup
3362 # Making the distinction at this stage helps have simpler backup
3362 # logic.
3363 # logic.
3363 removunk = set()
3364 removunk = set()
3364 for abs in removed:
3365 for abs in removed:
3365 target = repo.wjoin(abs)
3366 target = repo.wjoin(abs)
3366 if os.path.lexists(target):
3367 if os.path.lexists(target):
3367 removunk.add(abs)
3368 removunk.add(abs)
3368 removed -= removunk
3369 removed -= removunk
3369
3370
3370 dsremovunk = set()
3371 dsremovunk = set()
3371 for abs in dsremoved:
3372 for abs in dsremoved:
3372 target = repo.wjoin(abs)
3373 target = repo.wjoin(abs)
3373 if os.path.lexists(target):
3374 if os.path.lexists(target):
3374 dsremovunk.add(abs)
3375 dsremovunk.add(abs)
3375 dsremoved -= dsremovunk
3376 dsremoved -= dsremovunk
3376
3377
3377 # action to be actually performed by revert
3378 # action to be actually performed by revert
3378 # (<list of file>, message>) tuple
3379 # (<list of file>, message>) tuple
3379 actions = {
3380 actions = {
3380 b'revert': ([], _(b'reverting %s\n')),
3381 b'revert': ([], _(b'reverting %s\n')),
3381 b'add': ([], _(b'adding %s\n')),
3382 b'add': ([], _(b'adding %s\n')),
3382 b'remove': ([], _(b'removing %s\n')),
3383 b'remove': ([], _(b'removing %s\n')),
3383 b'drop': ([], _(b'removing %s\n')),
3384 b'drop': ([], _(b'removing %s\n')),
3384 b'forget': ([], _(b'forgetting %s\n')),
3385 b'forget': ([], _(b'forgetting %s\n')),
3385 b'undelete': ([], _(b'undeleting %s\n')),
3386 b'undelete': ([], _(b'undeleting %s\n')),
3386 b'noop': (None, _(b'no changes needed to %s\n')),
3387 b'noop': (None, _(b'no changes needed to %s\n')),
3387 b'unknown': (None, _(b'file not managed: %s\n')),
3388 b'unknown': (None, _(b'file not managed: %s\n')),
3388 }
3389 }
3389
3390
3390 # "constant" that convey the backup strategy.
3391 # "constant" that convey the backup strategy.
3391 # All set to `discard` if `no-backup` is set do avoid checking
3392 # All set to `discard` if `no-backup` is set do avoid checking
3392 # no_backup lower in the code.
3393 # no_backup lower in the code.
3393 # These values are ordered for comparison purposes
3394 # These values are ordered for comparison purposes
3394 backupinteractive = 3 # do backup if interactively modified
3395 backupinteractive = 3 # do backup if interactively modified
3395 backup = 2 # unconditionally do backup
3396 backup = 2 # unconditionally do backup
3396 check = 1 # check if the existing file differs from target
3397 check = 1 # check if the existing file differs from target
3397 discard = 0 # never do backup
3398 discard = 0 # never do backup
3398 if opts.get(b'no_backup'):
3399 if opts.get(b'no_backup'):
3399 backupinteractive = backup = check = discard
3400 backupinteractive = backup = check = discard
3400 if interactive:
3401 if interactive:
3401 dsmodifiedbackup = backupinteractive
3402 dsmodifiedbackup = backupinteractive
3402 else:
3403 else:
3403 dsmodifiedbackup = backup
3404 dsmodifiedbackup = backup
3404 tobackup = set()
3405 tobackup = set()
3405
3406
3406 backupanddel = actions[b'remove']
3407 backupanddel = actions[b'remove']
3407 if not opts.get(b'no_backup'):
3408 if not opts.get(b'no_backup'):
3408 backupanddel = actions[b'drop']
3409 backupanddel = actions[b'drop']
3409
3410
3410 disptable = (
3411 disptable = (
3411 # dispatch table:
3412 # dispatch table:
3412 # file state
3413 # file state
3413 # action
3414 # action
3414 # make backup
3415 # make backup
3415 ## Sets that results that will change file on disk
3416 ## Sets that results that will change file on disk
3416 # Modified compared to target, no local change
3417 # Modified compared to target, no local change
3417 (modified, actions[b'revert'], discard),
3418 (modified, actions[b'revert'], discard),
3418 # Modified compared to target, but local file is deleted
3419 # Modified compared to target, but local file is deleted
3419 (deleted, actions[b'revert'], discard),
3420 (deleted, actions[b'revert'], discard),
3420 # Modified compared to target, local change
3421 # Modified compared to target, local change
3421 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3422 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3422 # Added since target
3423 # Added since target
3423 (added, actions[b'remove'], discard),
3424 (added, actions[b'remove'], discard),
3424 # Added in working directory
3425 # Added in working directory
3425 (dsadded, actions[b'forget'], discard),
3426 (dsadded, actions[b'forget'], discard),
3426 # Added since target, have local modification
3427 # Added since target, have local modification
3427 (modadded, backupanddel, backup),
3428 (modadded, backupanddel, backup),
3428 # Added since target but file is missing in working directory
3429 # Added since target but file is missing in working directory
3429 (deladded, actions[b'drop'], discard),
3430 (deladded, actions[b'drop'], discard),
3430 # Removed since target, before working copy parent
3431 # Removed since target, before working copy parent
3431 (removed, actions[b'add'], discard),
3432 (removed, actions[b'add'], discard),
3432 # Same as `removed` but an unknown file exists at the same path
3433 # Same as `removed` but an unknown file exists at the same path
3433 (removunk, actions[b'add'], check),
3434 (removunk, actions[b'add'], check),
3434 # Removed since targe, marked as such in working copy parent
3435 # Removed since targe, marked as such in working copy parent
3435 (dsremoved, actions[b'undelete'], discard),
3436 (dsremoved, actions[b'undelete'], discard),
3436 # Same as `dsremoved` but an unknown file exists at the same path
3437 # Same as `dsremoved` but an unknown file exists at the same path
3437 (dsremovunk, actions[b'undelete'], check),
3438 (dsremovunk, actions[b'undelete'], check),
3438 ## the following sets does not result in any file changes
3439 ## the following sets does not result in any file changes
3439 # File with no modification
3440 # File with no modification
3440 (clean, actions[b'noop'], discard),
3441 (clean, actions[b'noop'], discard),
3441 # Existing file, not tracked anywhere
3442 # Existing file, not tracked anywhere
3442 (unknown, actions[b'unknown'], discard),
3443 (unknown, actions[b'unknown'], discard),
3443 )
3444 )
3444
3445
3445 for abs, exact in sorted(names.items()):
3446 for abs, exact in sorted(names.items()):
3446 # target file to be touch on disk (relative to cwd)
3447 # target file to be touch on disk (relative to cwd)
3447 target = repo.wjoin(abs)
3448 target = repo.wjoin(abs)
3448 # search the entry in the dispatch table.
3449 # search the entry in the dispatch table.
3449 # if the file is in any of these sets, it was touched in the working
3450 # if the file is in any of these sets, it was touched in the working
3450 # directory parent and we are sure it needs to be reverted.
3451 # directory parent and we are sure it needs to be reverted.
3451 for table, (xlist, msg), dobackup in disptable:
3452 for table, (xlist, msg), dobackup in disptable:
3452 if abs not in table:
3453 if abs not in table:
3453 continue
3454 continue
3454 if xlist is not None:
3455 if xlist is not None:
3455 xlist.append(abs)
3456 xlist.append(abs)
3456 if dobackup:
3457 if dobackup:
3457 # If in interactive mode, don't automatically create
3458 # If in interactive mode, don't automatically create
3458 # .orig files (issue4793)
3459 # .orig files (issue4793)
3459 if dobackup == backupinteractive:
3460 if dobackup == backupinteractive:
3460 tobackup.add(abs)
3461 tobackup.add(abs)
3461 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3462 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3462 absbakname = scmutil.backuppath(ui, repo, abs)
3463 absbakname = scmutil.backuppath(ui, repo, abs)
3463 bakname = os.path.relpath(
3464 bakname = os.path.relpath(
3464 absbakname, start=repo.root
3465 absbakname, start=repo.root
3465 )
3466 )
3466 ui.note(
3467 ui.note(
3467 _(b'saving current version of %s as %s\n')
3468 _(b'saving current version of %s as %s\n')
3468 % (uipathfn(abs), uipathfn(bakname))
3469 % (uipathfn(abs), uipathfn(bakname))
3469 )
3470 )
3470 if not opts.get(b'dry_run'):
3471 if not opts.get(b'dry_run'):
3471 if interactive:
3472 if interactive:
3472 util.copyfile(target, absbakname)
3473 util.copyfile(target, absbakname)
3473 else:
3474 else:
3474 util.rename(target, absbakname)
3475 util.rename(target, absbakname)
3475 if opts.get(b'dry_run'):
3476 if opts.get(b'dry_run'):
3476 if ui.verbose or not exact:
3477 if ui.verbose or not exact:
3477 ui.status(msg % uipathfn(abs))
3478 ui.status(msg % uipathfn(abs))
3478 elif exact:
3479 elif exact:
3479 ui.warn(msg % uipathfn(abs))
3480 ui.warn(msg % uipathfn(abs))
3480 break
3481 break
3481
3482
3482 if not opts.get(b'dry_run'):
3483 if not opts.get(b'dry_run'):
3483 needdata = (b'revert', b'add', b'undelete')
3484 needdata = (b'revert', b'add', b'undelete')
3484 oplist = [actions[name][0] for name in needdata]
3485 oplist = [actions[name][0] for name in needdata]
3485 prefetch = scmutil.prefetchfiles
3486 prefetch = scmutil.prefetchfiles
3486 matchfiles = scmutil.matchfiles(
3487 matchfiles = scmutil.matchfiles(
3487 repo, [f for sublist in oplist for f in sublist]
3488 repo, [f for sublist in oplist for f in sublist]
3488 )
3489 )
3489 prefetch(
3490 prefetch(
3490 repo,
3491 repo,
3491 [(ctx.rev(), matchfiles)],
3492 [(ctx.rev(), matchfiles)],
3492 )
3493 )
3493 match = scmutil.match(repo[None], pats)
3494 match = scmutil.match(repo[None], pats)
3494 _performrevert(
3495 _performrevert(
3495 repo,
3496 repo,
3496 ctx,
3497 ctx,
3497 names,
3498 names,
3498 uipathfn,
3499 uipathfn,
3499 actions,
3500 actions,
3500 match,
3501 match,
3501 interactive,
3502 interactive,
3502 tobackup,
3503 tobackup,
3503 )
3504 )
3504
3505
3505 if targetsubs:
3506 if targetsubs:
3506 # Revert the subrepos on the revert list
3507 # Revert the subrepos on the revert list
3507 for sub in targetsubs:
3508 for sub in targetsubs:
3508 try:
3509 try:
3509 wctx.sub(sub).revert(
3510 wctx.sub(sub).revert(
3510 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3511 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3511 )
3512 )
3512 except KeyError:
3513 except KeyError:
3513 raise error.Abort(
3514 raise error.Abort(
3514 b"subrepository '%s' does not exist in %s!"
3515 b"subrepository '%s' does not exist in %s!"
3515 % (sub, short(ctx.node()))
3516 % (sub, short(ctx.node()))
3516 )
3517 )
3517
3518
3518
3519
3519 def _performrevert(
3520 def _performrevert(
3520 repo,
3521 repo,
3521 ctx,
3522 ctx,
3522 names,
3523 names,
3523 uipathfn,
3524 uipathfn,
3524 actions,
3525 actions,
3525 match,
3526 match,
3526 interactive=False,
3527 interactive=False,
3527 tobackup=None,
3528 tobackup=None,
3528 ):
3529 ):
3529 """function that actually perform all the actions computed for revert
3530 """function that actually perform all the actions computed for revert
3530
3531
3531 This is an independent function to let extension to plug in and react to
3532 This is an independent function to let extension to plug in and react to
3532 the imminent revert.
3533 the imminent revert.
3533
3534
3534 Make sure you have the working directory locked when calling this function.
3535 Make sure you have the working directory locked when calling this function.
3535 """
3536 """
3536 parent, p2 = repo.dirstate.parents()
3537 parent, p2 = repo.dirstate.parents()
3537 node = ctx.node()
3538 node = ctx.node()
3538 excluded_files = []
3539 excluded_files = []
3539
3540
3540 def checkout(f):
3541 def checkout(f):
3541 fc = ctx[f]
3542 fc = ctx[f]
3542 repo.wwrite(f, fc.data(), fc.flags())
3543 repo.wwrite(f, fc.data(), fc.flags())
3543
3544
3544 def doremove(f):
3545 def doremove(f):
3545 try:
3546 try:
3546 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3547 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3547 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3548 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3548 except OSError:
3549 except OSError:
3549 pass
3550 pass
3550 repo.dirstate.remove(f)
3551 repo.dirstate.remove(f)
3551
3552
3552 def prntstatusmsg(action, f):
3553 def prntstatusmsg(action, f):
3553 exact = names[f]
3554 exact = names[f]
3554 if repo.ui.verbose or not exact:
3555 if repo.ui.verbose or not exact:
3555 repo.ui.status(actions[action][1] % uipathfn(f))
3556 repo.ui.status(actions[action][1] % uipathfn(f))
3556
3557
3557 audit_path = pathutil.pathauditor(repo.root, cached=True)
3558 audit_path = pathutil.pathauditor(repo.root, cached=True)
3558 for f in actions[b'forget'][0]:
3559 for f in actions[b'forget'][0]:
3559 if interactive:
3560 if interactive:
3560 choice = repo.ui.promptchoice(
3561 choice = repo.ui.promptchoice(
3561 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3562 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3562 )
3563 )
3563 if choice == 0:
3564 if choice == 0:
3564 prntstatusmsg(b'forget', f)
3565 prntstatusmsg(b'forget', f)
3565 repo.dirstate.drop(f)
3566 repo.dirstate.drop(f)
3566 else:
3567 else:
3567 excluded_files.append(f)
3568 excluded_files.append(f)
3568 else:
3569 else:
3569 prntstatusmsg(b'forget', f)
3570 prntstatusmsg(b'forget', f)
3570 repo.dirstate.drop(f)
3571 repo.dirstate.drop(f)
3571 for f in actions[b'remove'][0]:
3572 for f in actions[b'remove'][0]:
3572 audit_path(f)
3573 audit_path(f)
3573 if interactive:
3574 if interactive:
3574 choice = repo.ui.promptchoice(
3575 choice = repo.ui.promptchoice(
3575 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3576 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3576 )
3577 )
3577 if choice == 0:
3578 if choice == 0:
3578 prntstatusmsg(b'remove', f)
3579 prntstatusmsg(b'remove', f)
3579 doremove(f)
3580 doremove(f)
3580 else:
3581 else:
3581 excluded_files.append(f)
3582 excluded_files.append(f)
3582 else:
3583 else:
3583 prntstatusmsg(b'remove', f)
3584 prntstatusmsg(b'remove', f)
3584 doremove(f)
3585 doremove(f)
3585 for f in actions[b'drop'][0]:
3586 for f in actions[b'drop'][0]:
3586 audit_path(f)
3587 audit_path(f)
3587 prntstatusmsg(b'drop', f)
3588 prntstatusmsg(b'drop', f)
3588 repo.dirstate.remove(f)
3589 repo.dirstate.remove(f)
3589
3590
3590 normal = None
3591 normal = None
3591 if node == parent:
3592 if node == parent:
3592 # We're reverting to our parent. If possible, we'd like status
3593 # We're reverting to our parent. If possible, we'd like status
3593 # to report the file as clean. We have to use normallookup for
3594 # to report the file as clean. We have to use normallookup for
3594 # merges to avoid losing information about merged/dirty files.
3595 # merges to avoid losing information about merged/dirty files.
3595 if p2 != nullid:
3596 if p2 != nullid:
3596 normal = repo.dirstate.normallookup
3597 normal = repo.dirstate.normallookup
3597 else:
3598 else:
3598 normal = repo.dirstate.normal
3599 normal = repo.dirstate.normal
3599
3600
3600 newlyaddedandmodifiedfiles = set()
3601 newlyaddedandmodifiedfiles = set()
3601 if interactive:
3602 if interactive:
3602 # Prompt the user for changes to revert
3603 # Prompt the user for changes to revert
3603 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3604 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3604 m = scmutil.matchfiles(repo, torevert)
3605 m = scmutil.matchfiles(repo, torevert)
3605 diffopts = patch.difffeatureopts(
3606 diffopts = patch.difffeatureopts(
3606 repo.ui,
3607 repo.ui,
3607 whitespace=True,
3608 whitespace=True,
3608 section=b'commands',
3609 section=b'commands',
3609 configprefix=b'revert.interactive.',
3610 configprefix=b'revert.interactive.',
3610 )
3611 )
3611 diffopts.nodates = True
3612 diffopts.nodates = True
3612 diffopts.git = True
3613 diffopts.git = True
3613 operation = b'apply'
3614 operation = b'apply'
3614 if node == parent:
3615 if node == parent:
3615 if repo.ui.configbool(
3616 if repo.ui.configbool(
3616 b'experimental', b'revert.interactive.select-to-keep'
3617 b'experimental', b'revert.interactive.select-to-keep'
3617 ):
3618 ):
3618 operation = b'keep'
3619 operation = b'keep'
3619 else:
3620 else:
3620 operation = b'discard'
3621 operation = b'discard'
3621
3622
3622 if operation == b'apply':
3623 if operation == b'apply':
3623 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3624 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3624 else:
3625 else:
3625 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3626 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3626 originalchunks = patch.parsepatch(diff)
3627 originalchunks = patch.parsepatch(diff)
3627
3628
3628 try:
3629 try:
3629
3630
3630 chunks, opts = recordfilter(
3631 chunks, opts = recordfilter(
3631 repo.ui, originalchunks, match, operation=operation
3632 repo.ui, originalchunks, match, operation=operation
3632 )
3633 )
3633 if operation == b'discard':
3634 if operation == b'discard':
3634 chunks = patch.reversehunks(chunks)
3635 chunks = patch.reversehunks(chunks)
3635
3636
3636 except error.PatchError as err:
3637 except error.PatchError as err:
3637 raise error.Abort(_(b'error parsing patch: %s') % err)
3638 raise error.Abort(_(b'error parsing patch: %s') % err)
3638
3639
3639 # FIXME: when doing an interactive revert of a copy, there's no way of
3640 # FIXME: when doing an interactive revert of a copy, there's no way of
3640 # performing a partial revert of the added file, the only option is
3641 # performing a partial revert of the added file, the only option is
3641 # "remove added file <name> (Yn)?", so we don't need to worry about the
3642 # "remove added file <name> (Yn)?", so we don't need to worry about the
3642 # alsorestore value. Ideally we'd be able to partially revert
3643 # alsorestore value. Ideally we'd be able to partially revert
3643 # copied/renamed files.
3644 # copied/renamed files.
3644 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3645 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3645 chunks, originalchunks
3646 chunks, originalchunks
3646 )
3647 )
3647 if tobackup is None:
3648 if tobackup is None:
3648 tobackup = set()
3649 tobackup = set()
3649 # Apply changes
3650 # Apply changes
3650 fp = stringio()
3651 fp = stringio()
3651 # chunks are serialized per file, but files aren't sorted
3652 # chunks are serialized per file, but files aren't sorted
3652 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3653 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3653 prntstatusmsg(b'revert', f)
3654 prntstatusmsg(b'revert', f)
3654 files = set()
3655 files = set()
3655 for c in chunks:
3656 for c in chunks:
3656 if ishunk(c):
3657 if ishunk(c):
3657 abs = c.header.filename()
3658 abs = c.header.filename()
3658 # Create a backup file only if this hunk should be backed up
3659 # Create a backup file only if this hunk should be backed up
3659 if c.header.filename() in tobackup:
3660 if c.header.filename() in tobackup:
3660 target = repo.wjoin(abs)
3661 target = repo.wjoin(abs)
3661 bakname = scmutil.backuppath(repo.ui, repo, abs)
3662 bakname = scmutil.backuppath(repo.ui, repo, abs)
3662 util.copyfile(target, bakname)
3663 util.copyfile(target, bakname)
3663 tobackup.remove(abs)
3664 tobackup.remove(abs)
3664 if abs not in files:
3665 if abs not in files:
3665 files.add(abs)
3666 files.add(abs)
3666 if operation == b'keep':
3667 if operation == b'keep':
3667 checkout(abs)
3668 checkout(abs)
3668 c.write(fp)
3669 c.write(fp)
3669 dopatch = fp.tell()
3670 dopatch = fp.tell()
3670 fp.seek(0)
3671 fp.seek(0)
3671 if dopatch:
3672 if dopatch:
3672 try:
3673 try:
3673 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3674 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3674 except error.PatchError as err:
3675 except error.PatchError as err:
3675 raise error.Abort(pycompat.bytestr(err))
3676 raise error.Abort(pycompat.bytestr(err))
3676 del fp
3677 del fp
3677 else:
3678 else:
3678 for f in actions[b'revert'][0]:
3679 for f in actions[b'revert'][0]:
3679 prntstatusmsg(b'revert', f)
3680 prntstatusmsg(b'revert', f)
3680 checkout(f)
3681 checkout(f)
3681 if normal:
3682 if normal:
3682 normal(f)
3683 normal(f)
3683
3684
3684 for f in actions[b'add'][0]:
3685 for f in actions[b'add'][0]:
3685 # Don't checkout modified files, they are already created by the diff
3686 # Don't checkout modified files, they are already created by the diff
3686 if f not in newlyaddedandmodifiedfiles:
3687 if f not in newlyaddedandmodifiedfiles:
3687 prntstatusmsg(b'add', f)
3688 prntstatusmsg(b'add', f)
3688 checkout(f)
3689 checkout(f)
3689 repo.dirstate.add(f)
3690 repo.dirstate.add(f)
3690
3691
3691 normal = repo.dirstate.normallookup
3692 normal = repo.dirstate.normallookup
3692 if node == parent and p2 == nullid:
3693 if node == parent and p2 == nullid:
3693 normal = repo.dirstate.normal
3694 normal = repo.dirstate.normal
3694 for f in actions[b'undelete'][0]:
3695 for f in actions[b'undelete'][0]:
3695 if interactive:
3696 if interactive:
3696 choice = repo.ui.promptchoice(
3697 choice = repo.ui.promptchoice(
3697 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3698 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3698 )
3699 )
3699 if choice == 0:
3700 if choice == 0:
3700 prntstatusmsg(b'undelete', f)
3701 prntstatusmsg(b'undelete', f)
3701 checkout(f)
3702 checkout(f)
3702 normal(f)
3703 normal(f)
3703 else:
3704 else:
3704 excluded_files.append(f)
3705 excluded_files.append(f)
3705 else:
3706 else:
3706 prntstatusmsg(b'undelete', f)
3707 prntstatusmsg(b'undelete', f)
3707 checkout(f)
3708 checkout(f)
3708 normal(f)
3709 normal(f)
3709
3710
3710 copied = copies.pathcopies(repo[parent], ctx)
3711 copied = copies.pathcopies(repo[parent], ctx)
3711
3712
3712 for f in (
3713 for f in (
3713 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3714 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3714 ):
3715 ):
3715 if f in copied:
3716 if f in copied:
3716 repo.dirstate.copy(copied[f], f)
3717 repo.dirstate.copy(copied[f], f)
3717
3718
3718
3719
3719 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3720 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3720 # commands.outgoing. "missing" is "missing" of the result of
3721 # commands.outgoing. "missing" is "missing" of the result of
3721 # "findcommonoutgoing()"
3722 # "findcommonoutgoing()"
3722 outgoinghooks = util.hooks()
3723 outgoinghooks = util.hooks()
3723
3724
3724 # a list of (ui, repo) functions called by commands.summary
3725 # a list of (ui, repo) functions called by commands.summary
3725 summaryhooks = util.hooks()
3726 summaryhooks = util.hooks()
3726
3727
3727 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3728 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3728 #
3729 #
3729 # functions should return tuple of booleans below, if 'changes' is None:
3730 # functions should return tuple of booleans below, if 'changes' is None:
3730 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3731 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3731 #
3732 #
3732 # otherwise, 'changes' is a tuple of tuples below:
3733 # otherwise, 'changes' is a tuple of tuples below:
3733 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3734 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3734 # - (desturl, destbranch, destpeer, outgoing)
3735 # - (desturl, destbranch, destpeer, outgoing)
3735 summaryremotehooks = util.hooks()
3736 summaryremotehooks = util.hooks()
3736
3737
3737
3738
3738 def checkunfinished(repo, commit=False, skipmerge=False):
3739 def checkunfinished(repo, commit=False, skipmerge=False):
3739 """Look for an unfinished multistep operation, like graft, and abort
3740 """Look for an unfinished multistep operation, like graft, and abort
3740 if found. It's probably good to check this right before
3741 if found. It's probably good to check this right before
3741 bailifchanged().
3742 bailifchanged().
3742 """
3743 """
3743 # Check for non-clearable states first, so things like rebase will take
3744 # Check for non-clearable states first, so things like rebase will take
3744 # precedence over update.
3745 # precedence over update.
3745 for state in statemod._unfinishedstates:
3746 for state in statemod._unfinishedstates:
3746 if (
3747 if (
3747 state._clearable
3748 state._clearable
3748 or (commit and state._allowcommit)
3749 or (commit and state._allowcommit)
3749 or state._reportonly
3750 or state._reportonly
3750 ):
3751 ):
3751 continue
3752 continue
3752 if state.isunfinished(repo):
3753 if state.isunfinished(repo):
3753 raise error.StateError(state.msg(), hint=state.hint())
3754 raise error.StateError(state.msg(), hint=state.hint())
3754
3755
3755 for s in statemod._unfinishedstates:
3756 for s in statemod._unfinishedstates:
3756 if (
3757 if (
3757 not s._clearable
3758 not s._clearable
3758 or (commit and s._allowcommit)
3759 or (commit and s._allowcommit)
3759 or (s._opname == b'merge' and skipmerge)
3760 or (s._opname == b'merge' and skipmerge)
3760 or s._reportonly
3761 or s._reportonly
3761 ):
3762 ):
3762 continue
3763 continue
3763 if s.isunfinished(repo):
3764 if s.isunfinished(repo):
3764 raise error.StateError(s.msg(), hint=s.hint())
3765 raise error.StateError(s.msg(), hint=s.hint())
3765
3766
3766
3767
3767 def clearunfinished(repo):
3768 def clearunfinished(repo):
3768 """Check for unfinished operations (as above), and clear the ones
3769 """Check for unfinished operations (as above), and clear the ones
3769 that are clearable.
3770 that are clearable.
3770 """
3771 """
3771 for state in statemod._unfinishedstates:
3772 for state in statemod._unfinishedstates:
3772 if state._reportonly:
3773 if state._reportonly:
3773 continue
3774 continue
3774 if not state._clearable and state.isunfinished(repo):
3775 if not state._clearable and state.isunfinished(repo):
3775 raise error.StateError(state.msg(), hint=state.hint())
3776 raise error.StateError(state.msg(), hint=state.hint())
3776
3777
3777 for s in statemod._unfinishedstates:
3778 for s in statemod._unfinishedstates:
3778 if s._opname == b'merge' or state._reportonly:
3779 if s._opname == b'merge' or state._reportonly:
3779 continue
3780 continue
3780 if s._clearable and s.isunfinished(repo):
3781 if s._clearable and s.isunfinished(repo):
3781 util.unlink(repo.vfs.join(s._fname))
3782 util.unlink(repo.vfs.join(s._fname))
3782
3783
3783
3784
3784 def getunfinishedstate(repo):
3785 def getunfinishedstate(repo):
3785 """Checks for unfinished operations and returns statecheck object
3786 """Checks for unfinished operations and returns statecheck object
3786 for it"""
3787 for it"""
3787 for state in statemod._unfinishedstates:
3788 for state in statemod._unfinishedstates:
3788 if state.isunfinished(repo):
3789 if state.isunfinished(repo):
3789 return state
3790 return state
3790 return None
3791 return None
3791
3792
3792
3793
3793 def howtocontinue(repo):
3794 def howtocontinue(repo):
3794 """Check for an unfinished operation and return the command to finish
3795 """Check for an unfinished operation and return the command to finish
3795 it.
3796 it.
3796
3797
3797 statemod._unfinishedstates list is checked for an unfinished operation
3798 statemod._unfinishedstates list is checked for an unfinished operation
3798 and the corresponding message to finish it is generated if a method to
3799 and the corresponding message to finish it is generated if a method to
3799 continue is supported by the operation.
3800 continue is supported by the operation.
3800
3801
3801 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3802 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3802 a boolean.
3803 a boolean.
3803 """
3804 """
3804 contmsg = _(b"continue: %s")
3805 contmsg = _(b"continue: %s")
3805 for state in statemod._unfinishedstates:
3806 for state in statemod._unfinishedstates:
3806 if not state._continueflag:
3807 if not state._continueflag:
3807 continue
3808 continue
3808 if state.isunfinished(repo):
3809 if state.isunfinished(repo):
3809 return contmsg % state.continuemsg(), True
3810 return contmsg % state.continuemsg(), True
3810 if repo[None].dirty(missing=True, merge=False, branch=False):
3811 if repo[None].dirty(missing=True, merge=False, branch=False):
3811 return contmsg % _(b"hg commit"), False
3812 return contmsg % _(b"hg commit"), False
3812 return None, None
3813 return None, None
3813
3814
3814
3815
3815 def checkafterresolved(repo):
3816 def checkafterresolved(repo):
3816 """Inform the user about the next action after completing hg resolve
3817 """Inform the user about the next action after completing hg resolve
3817
3818
3818 If there's a an unfinished operation that supports continue flag,
3819 If there's a an unfinished operation that supports continue flag,
3819 howtocontinue will yield repo.ui.warn as the reporter.
3820 howtocontinue will yield repo.ui.warn as the reporter.
3820
3821
3821 Otherwise, it will yield repo.ui.note.
3822 Otherwise, it will yield repo.ui.note.
3822 """
3823 """
3823 msg, warning = howtocontinue(repo)
3824 msg, warning = howtocontinue(repo)
3824 if msg is not None:
3825 if msg is not None:
3825 if warning:
3826 if warning:
3826 repo.ui.warn(b"%s\n" % msg)
3827 repo.ui.warn(b"%s\n" % msg)
3827 else:
3828 else:
3828 repo.ui.note(b"%s\n" % msg)
3829 repo.ui.note(b"%s\n" % msg)
3829
3830
3830
3831
3831 def wrongtooltocontinue(repo, task):
3832 def wrongtooltocontinue(repo, task):
3832 """Raise an abort suggesting how to properly continue if there is an
3833 """Raise an abort suggesting how to properly continue if there is an
3833 active task.
3834 active task.
3834
3835
3835 Uses howtocontinue() to find the active task.
3836 Uses howtocontinue() to find the active task.
3836
3837
3837 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3838 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3838 a hint.
3839 a hint.
3839 """
3840 """
3840 after = howtocontinue(repo)
3841 after = howtocontinue(repo)
3841 hint = None
3842 hint = None
3842 if after[1]:
3843 if after[1]:
3843 hint = after[0]
3844 hint = after[0]
3844 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3845 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3845
3846
3846
3847
3847 def abortgraft(ui, repo, graftstate):
3848 def abortgraft(ui, repo, graftstate):
3848 """abort the interrupted graft and rollbacks to the state before interrupted
3849 """abort the interrupted graft and rollbacks to the state before interrupted
3849 graft"""
3850 graft"""
3850 if not graftstate.exists():
3851 if not graftstate.exists():
3851 raise error.StateError(_(b"no interrupted graft to abort"))
3852 raise error.StateError(_(b"no interrupted graft to abort"))
3852 statedata = readgraftstate(repo, graftstate)
3853 statedata = readgraftstate(repo, graftstate)
3853 newnodes = statedata.get(b'newnodes')
3854 newnodes = statedata.get(b'newnodes')
3854 if newnodes is None:
3855 if newnodes is None:
3855 # and old graft state which does not have all the data required to abort
3856 # and old graft state which does not have all the data required to abort
3856 # the graft
3857 # the graft
3857 raise error.Abort(_(b"cannot abort using an old graftstate"))
3858 raise error.Abort(_(b"cannot abort using an old graftstate"))
3858
3859
3859 # changeset from which graft operation was started
3860 # changeset from which graft operation was started
3860 if len(newnodes) > 0:
3861 if len(newnodes) > 0:
3861 startctx = repo[newnodes[0]].p1()
3862 startctx = repo[newnodes[0]].p1()
3862 else:
3863 else:
3863 startctx = repo[b'.']
3864 startctx = repo[b'.']
3864 # whether to strip or not
3865 # whether to strip or not
3865 cleanup = False
3866 cleanup = False
3866
3867
3867 if newnodes:
3868 if newnodes:
3868 newnodes = [repo[r].rev() for r in newnodes]
3869 newnodes = [repo[r].rev() for r in newnodes]
3869 cleanup = True
3870 cleanup = True
3870 # checking that none of the newnodes turned public or is public
3871 # checking that none of the newnodes turned public or is public
3871 immutable = [c for c in newnodes if not repo[c].mutable()]
3872 immutable = [c for c in newnodes if not repo[c].mutable()]
3872 if immutable:
3873 if immutable:
3873 repo.ui.warn(
3874 repo.ui.warn(
3874 _(b"cannot clean up public changesets %s\n")
3875 _(b"cannot clean up public changesets %s\n")
3875 % b', '.join(bytes(repo[r]) for r in immutable),
3876 % b', '.join(bytes(repo[r]) for r in immutable),
3876 hint=_(b"see 'hg help phases' for details"),
3877 hint=_(b"see 'hg help phases' for details"),
3877 )
3878 )
3878 cleanup = False
3879 cleanup = False
3879
3880
3880 # checking that no new nodes are created on top of grafted revs
3881 # checking that no new nodes are created on top of grafted revs
3881 desc = set(repo.changelog.descendants(newnodes))
3882 desc = set(repo.changelog.descendants(newnodes))
3882 if desc - set(newnodes):
3883 if desc - set(newnodes):
3883 repo.ui.warn(
3884 repo.ui.warn(
3884 _(
3885 _(
3885 b"new changesets detected on destination "
3886 b"new changesets detected on destination "
3886 b"branch, can't strip\n"
3887 b"branch, can't strip\n"
3887 )
3888 )
3888 )
3889 )
3889 cleanup = False
3890 cleanup = False
3890
3891
3891 if cleanup:
3892 if cleanup:
3892 with repo.wlock(), repo.lock():
3893 with repo.wlock(), repo.lock():
3893 mergemod.clean_update(startctx)
3894 mergemod.clean_update(startctx)
3894 # stripping the new nodes created
3895 # stripping the new nodes created
3895 strippoints = [
3896 strippoints = [
3896 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3897 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3897 ]
3898 ]
3898 repair.strip(repo.ui, repo, strippoints, backup=False)
3899 repair.strip(repo.ui, repo, strippoints, backup=False)
3899
3900
3900 if not cleanup:
3901 if not cleanup:
3901 # we don't update to the startnode if we can't strip
3902 # we don't update to the startnode if we can't strip
3902 startctx = repo[b'.']
3903 startctx = repo[b'.']
3903 mergemod.clean_update(startctx)
3904 mergemod.clean_update(startctx)
3904
3905
3905 ui.status(_(b"graft aborted\n"))
3906 ui.status(_(b"graft aborted\n"))
3906 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3907 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3907 graftstate.delete()
3908 graftstate.delete()
3908 return 0
3909 return 0
3909
3910
3910
3911
3911 def readgraftstate(repo, graftstate):
3912 def readgraftstate(repo, graftstate):
3912 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3913 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3913 """read the graft state file and return a dict of the data stored in it"""
3914 """read the graft state file and return a dict of the data stored in it"""
3914 try:
3915 try:
3915 return graftstate.read()
3916 return graftstate.read()
3916 except error.CorruptedState:
3917 except error.CorruptedState:
3917 nodes = repo.vfs.read(b'graftstate').splitlines()
3918 nodes = repo.vfs.read(b'graftstate').splitlines()
3918 return {b'nodes': nodes}
3919 return {b'nodes': nodes}
3919
3920
3920
3921
3921 def hgabortgraft(ui, repo):
3922 def hgabortgraft(ui, repo):
3922 """ abort logic for aborting graft using 'hg abort'"""
3923 """ abort logic for aborting graft using 'hg abort'"""
3923 with repo.wlock():
3924 with repo.wlock():
3924 graftstate = statemod.cmdstate(repo, b'graftstate')
3925 graftstate = statemod.cmdstate(repo, b'graftstate')
3925 return abortgraft(ui, repo, graftstate)
3926 return abortgraft(ui, repo, graftstate)
@@ -1,3113 +1,3113 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 dagop,
31 dagop,
32 encoding,
32 encoding,
33 error,
33 error,
34 fileset,
34 fileset,
35 match as matchmod,
35 match as matchmod,
36 mergestate as mergestatemod,
36 mergestate as mergestatemod,
37 metadata,
37 metadata,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 scmutil,
44 scmutil,
45 sparse,
45 sparse,
46 subrepo,
46 subrepo,
47 subrepoutil,
47 subrepoutil,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 dateutil,
51 dateutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 propertycache = util.propertycache
55 propertycache = util.propertycache
56
56
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65
65
66 def __init__(self, repo):
66 def __init__(self, repo):
67 self._repo = repo
67 self._repo = repo
68
68
69 def __bytes__(self):
69 def __bytes__(self):
70 return short(self.node())
70 return short(self.node())
71
71
72 __str__ = encoding.strmethod(__bytes__)
72 __str__ = encoding.strmethod(__bytes__)
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _buildstatusmanifest(self, status):
95 def _buildstatusmanifest(self, status):
96 """Builds a manifest that includes the given status results, if this is
96 """Builds a manifest that includes the given status results, if this is
97 a working copy context. For non-working copy contexts, it just returns
97 a working copy context. For non-working copy contexts, it just returns
98 the normal manifest."""
98 the normal manifest."""
99 return self.manifest()
99 return self.manifest()
100
100
101 def _matchstatus(self, other, match):
101 def _matchstatus(self, other, match):
102 """This internal method provides a way for child objects to override the
102 """This internal method provides a way for child objects to override the
103 match operator.
103 match operator.
104 """
104 """
105 return match
105 return match
106
106
107 def _buildstatus(
107 def _buildstatus(
108 self, other, s, match, listignored, listclean, listunknown
108 self, other, s, match, listignored, listclean, listunknown
109 ):
109 ):
110 """build a status with respect to another context"""
110 """build a status with respect to another context"""
111 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
116 # delta application.
116 # delta application.
117 mf2 = None
117 mf2 = None
118 if self.rev() is not None and self.rev() < other.rev():
118 if self.rev() is not None and self.rev() < other.rev():
119 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
121 if mf2 is None:
121 if mf2 is None:
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, match=match, clean=listclean)
129 d = mf1.diff(mf2, match=match, clean=listclean)
130 for fn, value in pycompat.iteritems(d):
130 for fn, value in pycompat.iteritems(d):
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 not in wdirfilenodeids:
143 elif node2 not in wdirfilenodeids:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [
156 unknown = [
157 fn
157 fn
158 for fn in unknown
158 for fn in unknown
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 ignored = [
161 ignored = [
162 fn
162 fn
163 for fn in ignored
163 for fn in ignored
164 if fn not in mf1 and (not match or match(fn))
164 if fn not in mf1 and (not match or match(fn))
165 ]
165 ]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(
169 return scmutil.status(
170 modified, added, removed, deleted, unknown, ignored, clean
170 modified, added, removed, deleted, unknown, ignored, clean
171 )
171 )
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
175 return subrepoutil.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182
182
183 def node(self):
183 def node(self):
184 return self._node
184 return self._node
185
185
186 def hex(self):
186 def hex(self):
187 return hex(self.node())
187 return hex(self.node())
188
188
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191
191
192 def manifestctx(self):
192 def manifestctx(self):
193 return self._manifestctx
193 return self._manifestctx
194
194
195 def repo(self):
195 def repo(self):
196 return self._repo
196 return self._repo
197
197
198 def phasestr(self):
198 def phasestr(self):
199 return phases.phasenames[self.phase()]
199 return phases.phasenames[self.phase()]
200
200
201 def mutable(self):
201 def mutable(self):
202 return self.phase() > phases.public
202 return self.phase() > phases.public
203
203
204 def matchfileset(self, cwd, expr, badfn=None):
204 def matchfileset(self, cwd, expr, badfn=None):
205 return fileset.match(self, cwd, expr, badfn=badfn)
205 return fileset.match(self, cwd, expr, badfn=badfn)
206
206
207 def obsolete(self):
207 def obsolete(self):
208 """True if the changeset is obsolete"""
208 """True if the changeset is obsolete"""
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210
210
211 def extinct(self):
211 def extinct(self):
212 """True if the changeset is extinct"""
212 """True if the changeset is extinct"""
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214
214
215 def orphan(self):
215 def orphan(self):
216 """True if the changeset is not obsolete, but its ancestor is"""
216 """True if the changeset is not obsolete, but its ancestor is"""
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218
218
219 def phasedivergent(self):
219 def phasedivergent(self):
220 """True if the changeset tries to be a successor of a public changeset
220 """True if the changeset tries to be a successor of a public changeset
221
221
222 Only non-public and non-obsolete changesets may be phase-divergent.
222 Only non-public and non-obsolete changesets may be phase-divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225
225
226 def contentdivergent(self):
226 def contentdivergent(self):
227 """Is a successor of a changeset with multiple possible successor sets
227 """Is a successor of a changeset with multiple possible successor sets
228
228
229 Only non-public and non-obsolete changesets may be content-divergent.
229 Only non-public and non-obsolete changesets may be content-divergent.
230 """
230 """
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232
232
233 def isunstable(self):
233 def isunstable(self):
234 """True if the changeset is either orphan, phase-divergent or
234 """True if the changeset is either orphan, phase-divergent or
235 content-divergent"""
235 content-divergent"""
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237
237
238 def instabilities(self):
238 def instabilities(self):
239 """return the list of instabilities affecting this changeset.
239 """return the list of instabilities affecting this changeset.
240
240
241 Instabilities are returned as strings. possible values are:
241 Instabilities are returned as strings. possible values are:
242 - orphan,
242 - orphan,
243 - phase-divergent,
243 - phase-divergent,
244 - content-divergent.
244 - content-divergent.
245 """
245 """
246 instabilities = []
246 instabilities = []
247 if self.orphan():
247 if self.orphan():
248 instabilities.append(b'orphan')
248 instabilities.append(b'orphan')
249 if self.phasedivergent():
249 if self.phasedivergent():
250 instabilities.append(b'phase-divergent')
250 instabilities.append(b'phase-divergent')
251 if self.contentdivergent():
251 if self.contentdivergent():
252 instabilities.append(b'content-divergent')
252 instabilities.append(b'content-divergent')
253 return instabilities
253 return instabilities
254
254
255 def parents(self):
255 def parents(self):
256 """return contexts for each parent changeset"""
256 """return contexts for each parent changeset"""
257 return self._parents
257 return self._parents
258
258
259 def p1(self):
259 def p1(self):
260 return self._parents[0]
260 return self._parents[0]
261
261
262 def p2(self):
262 def p2(self):
263 parents = self._parents
263 parents = self._parents
264 if len(parents) == 2:
264 if len(parents) == 2:
265 return parents[1]
265 return parents[1]
266 return self._repo[nullrev]
266 return self._repo[nullrev]
267
267
268 def _fileinfo(self, path):
268 def _fileinfo(self, path):
269 if '_manifest' in self.__dict__:
269 if '_manifest' in self.__dict__:
270 try:
270 try:
271 return self._manifest.find(path)
271 return self._manifest.find(path)
272 except KeyError:
272 except KeyError:
273 raise error.ManifestLookupError(
273 raise error.ManifestLookupError(
274 self._node or b'None', path, _(b'not found in manifest')
274 self._node or b'None', path, _(b'not found in manifest')
275 )
275 )
276 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 if path in self._manifestdelta:
277 if path in self._manifestdelta:
278 return (
278 return (
279 self._manifestdelta[path],
279 self._manifestdelta[path],
280 self._manifestdelta.flags(path),
280 self._manifestdelta.flags(path),
281 )
281 )
282 mfl = self._repo.manifestlog
282 mfl = self._repo.manifestlog
283 try:
283 try:
284 node, flag = mfl[self._changeset.manifest].find(path)
284 node, flag = mfl[self._changeset.manifest].find(path)
285 except KeyError:
285 except KeyError:
286 raise error.ManifestLookupError(
286 raise error.ManifestLookupError(
287 self._node or b'None', path, _(b'not found in manifest')
287 self._node or b'None', path, _(b'not found in manifest')
288 )
288 )
289
289
290 return node, flag
290 return node, flag
291
291
292 def filenode(self, path):
292 def filenode(self, path):
293 return self._fileinfo(path)[0]
293 return self._fileinfo(path)[0]
294
294
295 def flags(self, path):
295 def flags(self, path):
296 try:
296 try:
297 return self._fileinfo(path)[1]
297 return self._fileinfo(path)[1]
298 except error.LookupError:
298 except error.LookupError:
299 return b''
299 return b''
300
300
301 @propertycache
301 @propertycache
302 def _copies(self):
302 def _copies(self):
303 return metadata.computechangesetcopies(self)
303 return metadata.computechangesetcopies(self)
304
304
305 def p1copies(self):
305 def p1copies(self):
306 return self._copies[0]
306 return self._copies[0]
307
307
308 def p2copies(self):
308 def p2copies(self):
309 return self._copies[1]
309 return self._copies[1]
310
310
311 def sub(self, path, allowcreate=True):
311 def sub(self, path, allowcreate=True):
312 '''return a subrepo for the stored revision of path, never wdir()'''
312 '''return a subrepo for the stored revision of path, never wdir()'''
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314
314
315 def nullsub(self, path, pctx):
315 def nullsub(self, path, pctx):
316 return subrepo.nullsubrepo(self, path, pctx)
316 return subrepo.nullsubrepo(self, path, pctx)
317
317
318 def workingsub(self, path):
318 def workingsub(self, path):
319 """return a subrepo for the stored revision, or wdir if this is a wdir
319 """return a subrepo for the stored revision, or wdir if this is a wdir
320 context.
320 context.
321 """
321 """
322 return subrepo.subrepo(self, path, allowwdir=True)
322 return subrepo.subrepo(self, path, allowwdir=True)
323
323
324 def match(
324 def match(
325 self,
325 self,
326 pats=None,
326 pats=None,
327 include=None,
327 include=None,
328 exclude=None,
328 exclude=None,
329 default=b'glob',
329 default=b'glob',
330 listsubrepos=False,
330 listsubrepos=False,
331 badfn=None,
331 badfn=None,
332 cwd=None,
332 cwd=None,
333 ):
333 ):
334 r = self._repo
334 r = self._repo
335 if not cwd:
335 if not cwd:
336 cwd = r.getcwd()
336 cwd = r.getcwd()
337 return matchmod.match(
337 return matchmod.match(
338 r.root,
338 r.root,
339 cwd,
339 cwd,
340 pats,
340 pats,
341 include,
341 include,
342 exclude,
342 exclude,
343 default,
343 default,
344 auditor=r.nofsauditor,
344 auditor=r.nofsauditor,
345 ctx=self,
345 ctx=self,
346 listsubrepos=listsubrepos,
346 listsubrepos=listsubrepos,
347 badfn=badfn,
347 badfn=badfn,
348 )
348 )
349
349
350 def diff(
350 def diff(
351 self,
351 self,
352 ctx2=None,
352 ctx2=None,
353 match=None,
353 match=None,
354 changes=None,
354 changes=None,
355 opts=None,
355 opts=None,
356 losedatafn=None,
356 losedatafn=None,
357 pathfn=None,
357 pathfn=None,
358 copy=None,
358 copy=None,
359 copysourcematch=None,
359 copysourcematch=None,
360 hunksfilterfn=None,
360 hunksfilterfn=None,
361 ):
361 ):
362 """Returns a diff generator for the given contexts and matcher"""
362 """Returns a diff generator for the given contexts and matcher"""
363 if ctx2 is None:
363 if ctx2 is None:
364 ctx2 = self.p1()
364 ctx2 = self.p1()
365 if ctx2 is not None:
365 if ctx2 is not None:
366 ctx2 = self._repo[ctx2]
366 ctx2 = self._repo[ctx2]
367 return patch.diff(
367 return patch.diff(
368 self._repo,
368 self._repo,
369 ctx2,
369 ctx2,
370 self,
370 self,
371 match=match,
371 match=match,
372 changes=changes,
372 changes=changes,
373 opts=opts,
373 opts=opts,
374 losedatafn=losedatafn,
374 losedatafn=losedatafn,
375 pathfn=pathfn,
375 pathfn=pathfn,
376 copy=copy,
376 copy=copy,
377 copysourcematch=copysourcematch,
377 copysourcematch=copysourcematch,
378 hunksfilterfn=hunksfilterfn,
378 hunksfilterfn=hunksfilterfn,
379 )
379 )
380
380
381 def dirs(self):
381 def dirs(self):
382 return self._manifest.dirs()
382 return self._manifest.dirs()
383
383
384 def hasdir(self, dir):
384 def hasdir(self, dir):
385 return self._manifest.hasdir(dir)
385 return self._manifest.hasdir(dir)
386
386
387 def status(
387 def status(
388 self,
388 self,
389 other=None,
389 other=None,
390 match=None,
390 match=None,
391 listignored=False,
391 listignored=False,
392 listclean=False,
392 listclean=False,
393 listunknown=False,
393 listunknown=False,
394 listsubrepos=False,
394 listsubrepos=False,
395 ):
395 ):
396 """return status of files between two nodes or node and working
396 """return status of files between two nodes or node and working
397 directory.
397 directory.
398
398
399 If other is None, compare this node with working directory.
399 If other is None, compare this node with working directory.
400
400
401 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
401 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
402
402
403 Returns a mercurial.scmutils.status object.
403 Returns a mercurial.scmutils.status object.
404
404
405 Data can be accessed using either tuple notation:
405 Data can be accessed using either tuple notation:
406
406
407 (modified, added, removed, deleted, unknown, ignored, clean)
407 (modified, added, removed, deleted, unknown, ignored, clean)
408
408
409 or direct attribute access:
409 or direct attribute access:
410
410
411 s.modified, s.added, ...
411 s.modified, s.added, ...
412 """
412 """
413
413
414 ctx1 = self
414 ctx1 = self
415 ctx2 = self._repo[other]
415 ctx2 = self._repo[other]
416
416
417 # This next code block is, admittedly, fragile logic that tests for
417 # This next code block is, admittedly, fragile logic that tests for
418 # reversing the contexts and wouldn't need to exist if it weren't for
418 # reversing the contexts and wouldn't need to exist if it weren't for
419 # the fast (and common) code path of comparing the working directory
419 # the fast (and common) code path of comparing the working directory
420 # with its first parent.
420 # with its first parent.
421 #
421 #
422 # What we're aiming for here is the ability to call:
422 # What we're aiming for here is the ability to call:
423 #
423 #
424 # workingctx.status(parentctx)
424 # workingctx.status(parentctx)
425 #
425 #
426 # If we always built the manifest for each context and compared those,
426 # If we always built the manifest for each context and compared those,
427 # then we'd be done. But the special case of the above call means we
427 # then we'd be done. But the special case of the above call means we
428 # just copy the manifest of the parent.
428 # just copy the manifest of the parent.
429 reversed = False
429 reversed = False
430 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
430 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
431 reversed = True
431 reversed = True
432 ctx1, ctx2 = ctx2, ctx1
432 ctx1, ctx2 = ctx2, ctx1
433
433
434 match = self._repo.narrowmatch(match)
434 match = self._repo.narrowmatch(match)
435 match = ctx2._matchstatus(ctx1, match)
435 match = ctx2._matchstatus(ctx1, match)
436 r = scmutil.status([], [], [], [], [], [], [])
436 r = scmutil.status([], [], [], [], [], [], [])
437 r = ctx2._buildstatus(
437 r = ctx2._buildstatus(
438 ctx1, r, match, listignored, listclean, listunknown
438 ctx1, r, match, listignored, listclean, listunknown
439 )
439 )
440
440
441 if reversed:
441 if reversed:
442 # Reverse added and removed. Clear deleted, unknown and ignored as
442 # Reverse added and removed. Clear deleted, unknown and ignored as
443 # these make no sense to reverse.
443 # these make no sense to reverse.
444 r = scmutil.status(
444 r = scmutil.status(
445 r.modified, r.removed, r.added, [], [], [], r.clean
445 r.modified, r.removed, r.added, [], [], [], r.clean
446 )
446 )
447
447
448 if listsubrepos:
448 if listsubrepos:
449 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
449 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
450 try:
450 try:
451 rev2 = ctx2.subrev(subpath)
451 rev2 = ctx2.subrev(subpath)
452 except KeyError:
452 except KeyError:
453 # A subrepo that existed in node1 was deleted between
453 # A subrepo that existed in node1 was deleted between
454 # node1 and node2 (inclusive). Thus, ctx2's substate
454 # node1 and node2 (inclusive). Thus, ctx2's substate
455 # won't contain that subpath. The best we can do ignore it.
455 # won't contain that subpath. The best we can do ignore it.
456 rev2 = None
456 rev2 = None
457 submatch = matchmod.subdirmatcher(subpath, match)
457 submatch = matchmod.subdirmatcher(subpath, match)
458 s = sub.status(
458 s = sub.status(
459 rev2,
459 rev2,
460 match=submatch,
460 match=submatch,
461 ignored=listignored,
461 ignored=listignored,
462 clean=listclean,
462 clean=listclean,
463 unknown=listunknown,
463 unknown=listunknown,
464 listsubrepos=True,
464 listsubrepos=True,
465 )
465 )
466 for k in (
466 for k in (
467 'modified',
467 'modified',
468 'added',
468 'added',
469 'removed',
469 'removed',
470 'deleted',
470 'deleted',
471 'unknown',
471 'unknown',
472 'ignored',
472 'ignored',
473 'clean',
473 'clean',
474 ):
474 ):
475 rfiles, sfiles = getattr(r, k), getattr(s, k)
475 rfiles, sfiles = getattr(r, k), getattr(s, k)
476 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
476 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
477
477
478 r.modified.sort()
478 r.modified.sort()
479 r.added.sort()
479 r.added.sort()
480 r.removed.sort()
480 r.removed.sort()
481 r.deleted.sort()
481 r.deleted.sort()
482 r.unknown.sort()
482 r.unknown.sort()
483 r.ignored.sort()
483 r.ignored.sort()
484 r.clean.sort()
484 r.clean.sort()
485
485
486 return r
486 return r
487
487
488 def mergestate(self, clean=False):
488 def mergestate(self, clean=False):
489 """Get a mergestate object for this context."""
489 """Get a mergestate object for this context."""
490 raise NotImplementedError(
490 raise NotImplementedError(
491 '%s does not implement mergestate()' % self.__class__
491 '%s does not implement mergestate()' % self.__class__
492 )
492 )
493
493
494 def isempty(self):
494 def isempty(self):
495 return not (
495 return not (
496 len(self.parents()) > 1
496 len(self.parents()) > 1
497 or self.branch() != self.p1().branch()
497 or self.branch() != self.p1().branch()
498 or self.closesbranch()
498 or self.closesbranch()
499 or self.files()
499 or self.files()
500 )
500 )
501
501
502
502
503 class changectx(basectx):
503 class changectx(basectx):
504 """A changecontext object makes access to data related to a particular
504 """A changecontext object makes access to data related to a particular
505 changeset convenient. It represents a read-only context already present in
505 changeset convenient. It represents a read-only context already present in
506 the repo."""
506 the repo."""
507
507
508 def __init__(self, repo, rev, node, maybe_filtered=True):
508 def __init__(self, repo, rev, node, maybe_filtered=True):
509 super(changectx, self).__init__(repo)
509 super(changectx, self).__init__(repo)
510 self._rev = rev
510 self._rev = rev
511 self._node = node
511 self._node = node
512 # When maybe_filtered is True, the revision might be affected by
512 # When maybe_filtered is True, the revision might be affected by
513 # changelog filtering and operation through the filtered changelog must be used.
513 # changelog filtering and operation through the filtered changelog must be used.
514 #
514 #
515 # When maybe_filtered is False, the revision has already been checked
515 # When maybe_filtered is False, the revision has already been checked
516 # against filtering and is not filtered. Operation through the
516 # against filtering and is not filtered. Operation through the
517 # unfiltered changelog might be used in some case.
517 # unfiltered changelog might be used in some case.
518 self._maybe_filtered = maybe_filtered
518 self._maybe_filtered = maybe_filtered
519
519
520 def __hash__(self):
520 def __hash__(self):
521 try:
521 try:
522 return hash(self._rev)
522 return hash(self._rev)
523 except AttributeError:
523 except AttributeError:
524 return id(self)
524 return id(self)
525
525
526 def __nonzero__(self):
526 def __nonzero__(self):
527 return self._rev != nullrev
527 return self._rev != nullrev
528
528
529 __bool__ = __nonzero__
529 __bool__ = __nonzero__
530
530
531 @propertycache
531 @propertycache
532 def _changeset(self):
532 def _changeset(self):
533 if self._maybe_filtered:
533 if self._maybe_filtered:
534 repo = self._repo
534 repo = self._repo
535 else:
535 else:
536 repo = self._repo.unfiltered()
536 repo = self._repo.unfiltered()
537 return repo.changelog.changelogrevision(self.rev())
537 return repo.changelog.changelogrevision(self.rev())
538
538
539 @propertycache
539 @propertycache
540 def _manifest(self):
540 def _manifest(self):
541 return self._manifestctx.read()
541 return self._manifestctx.read()
542
542
543 @property
543 @property
544 def _manifestctx(self):
544 def _manifestctx(self):
545 return self._repo.manifestlog[self._changeset.manifest]
545 return self._repo.manifestlog[self._changeset.manifest]
546
546
547 @propertycache
547 @propertycache
548 def _manifestdelta(self):
548 def _manifestdelta(self):
549 return self._manifestctx.readdelta()
549 return self._manifestctx.readdelta()
550
550
551 @propertycache
551 @propertycache
552 def _parents(self):
552 def _parents(self):
553 repo = self._repo
553 repo = self._repo
554 if self._maybe_filtered:
554 if self._maybe_filtered:
555 cl = repo.changelog
555 cl = repo.changelog
556 else:
556 else:
557 cl = repo.unfiltered().changelog
557 cl = repo.unfiltered().changelog
558
558
559 p1, p2 = cl.parentrevs(self._rev)
559 p1, p2 = cl.parentrevs(self._rev)
560 if p2 == nullrev:
560 if p2 == nullrev:
561 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
561 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
562 return [
562 return [
563 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
563 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
564 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
564 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
565 ]
565 ]
566
566
567 def changeset(self):
567 def changeset(self):
568 c = self._changeset
568 c = self._changeset
569 return (
569 return (
570 c.manifest,
570 c.manifest,
571 c.user,
571 c.user,
572 c.date,
572 c.date,
573 c.files,
573 c.files,
574 c.description,
574 c.description,
575 c.extra,
575 c.extra,
576 )
576 )
577
577
578 def manifestnode(self):
578 def manifestnode(self):
579 return self._changeset.manifest
579 return self._changeset.manifest
580
580
581 def user(self):
581 def user(self):
582 return self._changeset.user
582 return self._changeset.user
583
583
584 def date(self):
584 def date(self):
585 return self._changeset.date
585 return self._changeset.date
586
586
587 def files(self):
587 def files(self):
588 return self._changeset.files
588 return self._changeset.files
589
589
590 def filesmodified(self):
590 def filesmodified(self):
591 modified = set(self.files())
591 modified = set(self.files())
592 modified.difference_update(self.filesadded())
592 modified.difference_update(self.filesadded())
593 modified.difference_update(self.filesremoved())
593 modified.difference_update(self.filesremoved())
594 return sorted(modified)
594 return sorted(modified)
595
595
596 def filesadded(self):
596 def filesadded(self):
597 filesadded = self._changeset.filesadded
597 filesadded = self._changeset.filesadded
598 compute_on_none = True
598 compute_on_none = True
599 if self._repo.filecopiesmode == b'changeset-sidedata':
599 if self._repo.filecopiesmode == b'changeset-sidedata':
600 compute_on_none = False
600 compute_on_none = False
601 else:
601 else:
602 source = self._repo.ui.config(b'experimental', b'copies.read-from')
602 source = self._repo.ui.config(b'experimental', b'copies.read-from')
603 if source == b'changeset-only':
603 if source == b'changeset-only':
604 compute_on_none = False
604 compute_on_none = False
605 elif source != b'compatibility':
605 elif source != b'compatibility':
606 # filelog mode, ignore any changelog content
606 # filelog mode, ignore any changelog content
607 filesadded = None
607 filesadded = None
608 if filesadded is None:
608 if filesadded is None:
609 if compute_on_none:
609 if compute_on_none:
610 filesadded = metadata.computechangesetfilesadded(self)
610 filesadded = metadata.computechangesetfilesadded(self)
611 else:
611 else:
612 filesadded = []
612 filesadded = []
613 return filesadded
613 return filesadded
614
614
615 def filesremoved(self):
615 def filesremoved(self):
616 filesremoved = self._changeset.filesremoved
616 filesremoved = self._changeset.filesremoved
617 compute_on_none = True
617 compute_on_none = True
618 if self._repo.filecopiesmode == b'changeset-sidedata':
618 if self._repo.filecopiesmode == b'changeset-sidedata':
619 compute_on_none = False
619 compute_on_none = False
620 else:
620 else:
621 source = self._repo.ui.config(b'experimental', b'copies.read-from')
621 source = self._repo.ui.config(b'experimental', b'copies.read-from')
622 if source == b'changeset-only':
622 if source == b'changeset-only':
623 compute_on_none = False
623 compute_on_none = False
624 elif source != b'compatibility':
624 elif source != b'compatibility':
625 # filelog mode, ignore any changelog content
625 # filelog mode, ignore any changelog content
626 filesremoved = None
626 filesremoved = None
627 if filesremoved is None:
627 if filesremoved is None:
628 if compute_on_none:
628 if compute_on_none:
629 filesremoved = metadata.computechangesetfilesremoved(self)
629 filesremoved = metadata.computechangesetfilesremoved(self)
630 else:
630 else:
631 filesremoved = []
631 filesremoved = []
632 return filesremoved
632 return filesremoved
633
633
634 @propertycache
634 @propertycache
635 def _copies(self):
635 def _copies(self):
636 p1copies = self._changeset.p1copies
636 p1copies = self._changeset.p1copies
637 p2copies = self._changeset.p2copies
637 p2copies = self._changeset.p2copies
638 compute_on_none = True
638 compute_on_none = True
639 if self._repo.filecopiesmode == b'changeset-sidedata':
639 if self._repo.filecopiesmode == b'changeset-sidedata':
640 compute_on_none = False
640 compute_on_none = False
641 else:
641 else:
642 source = self._repo.ui.config(b'experimental', b'copies.read-from')
642 source = self._repo.ui.config(b'experimental', b'copies.read-from')
643 # If config says to get copy metadata only from changeset, then
643 # If config says to get copy metadata only from changeset, then
644 # return that, defaulting to {} if there was no copy metadata. In
644 # return that, defaulting to {} if there was no copy metadata. In
645 # compatibility mode, we return copy data from the changeset if it
645 # compatibility mode, we return copy data from the changeset if it
646 # was recorded there, and otherwise we fall back to getting it from
646 # was recorded there, and otherwise we fall back to getting it from
647 # the filelogs (below).
647 # the filelogs (below).
648 #
648 #
649 # If we are in compatiblity mode and there is not data in the
649 # If we are in compatiblity mode and there is not data in the
650 # changeset), we get the copy metadata from the filelogs.
650 # changeset), we get the copy metadata from the filelogs.
651 #
651 #
652 # otherwise, when config said to read only from filelog, we get the
652 # otherwise, when config said to read only from filelog, we get the
653 # copy metadata from the filelogs.
653 # copy metadata from the filelogs.
654 if source == b'changeset-only':
654 if source == b'changeset-only':
655 compute_on_none = False
655 compute_on_none = False
656 elif source != b'compatibility':
656 elif source != b'compatibility':
657 # filelog mode, ignore any changelog content
657 # filelog mode, ignore any changelog content
658 p1copies = p2copies = None
658 p1copies = p2copies = None
659 if p1copies is None:
659 if p1copies is None:
660 if compute_on_none:
660 if compute_on_none:
661 p1copies, p2copies = super(changectx, self)._copies
661 p1copies, p2copies = super(changectx, self)._copies
662 else:
662 else:
663 if p1copies is None:
663 if p1copies is None:
664 p1copies = {}
664 p1copies = {}
665 if p2copies is None:
665 if p2copies is None:
666 p2copies = {}
666 p2copies = {}
667 return p1copies, p2copies
667 return p1copies, p2copies
668
668
669 def description(self):
669 def description(self):
670 return self._changeset.description
670 return self._changeset.description
671
671
672 def branch(self):
672 def branch(self):
673 return encoding.tolocal(self._changeset.extra.get(b"branch"))
673 return encoding.tolocal(self._changeset.extra.get(b"branch"))
674
674
675 def closesbranch(self):
675 def closesbranch(self):
676 return b'close' in self._changeset.extra
676 return b'close' in self._changeset.extra
677
677
678 def extra(self):
678 def extra(self):
679 """Return a dict of extra information."""
679 """Return a dict of extra information."""
680 return self._changeset.extra
680 return self._changeset.extra
681
681
682 def tags(self):
682 def tags(self):
683 """Return a list of byte tag names"""
683 """Return a list of byte tag names"""
684 return self._repo.nodetags(self._node)
684 return self._repo.nodetags(self._node)
685
685
686 def bookmarks(self):
686 def bookmarks(self):
687 """Return a list of byte bookmark names."""
687 """Return a list of byte bookmark names."""
688 return self._repo.nodebookmarks(self._node)
688 return self._repo.nodebookmarks(self._node)
689
689
690 def phase(self):
690 def phase(self):
691 return self._repo._phasecache.phase(self._repo, self._rev)
691 return self._repo._phasecache.phase(self._repo, self._rev)
692
692
693 def hidden(self):
693 def hidden(self):
694 return self._rev in repoview.filterrevs(self._repo, b'visible')
694 return self._rev in repoview.filterrevs(self._repo, b'visible')
695
695
696 def isinmemory(self):
696 def isinmemory(self):
697 return False
697 return False
698
698
699 def children(self):
699 def children(self):
700 """return list of changectx contexts for each child changeset.
700 """return list of changectx contexts for each child changeset.
701
701
702 This returns only the immediate child changesets. Use descendants() to
702 This returns only the immediate child changesets. Use descendants() to
703 recursively walk children.
703 recursively walk children.
704 """
704 """
705 c = self._repo.changelog.children(self._node)
705 c = self._repo.changelog.children(self._node)
706 return [self._repo[x] for x in c]
706 return [self._repo[x] for x in c]
707
707
708 def ancestors(self):
708 def ancestors(self):
709 for a in self._repo.changelog.ancestors([self._rev]):
709 for a in self._repo.changelog.ancestors([self._rev]):
710 yield self._repo[a]
710 yield self._repo[a]
711
711
712 def descendants(self):
712 def descendants(self):
713 """Recursively yield all children of the changeset.
713 """Recursively yield all children of the changeset.
714
714
715 For just the immediate children, use children()
715 For just the immediate children, use children()
716 """
716 """
717 for d in self._repo.changelog.descendants([self._rev]):
717 for d in self._repo.changelog.descendants([self._rev]):
718 yield self._repo[d]
718 yield self._repo[d]
719
719
720 def filectx(self, path, fileid=None, filelog=None):
720 def filectx(self, path, fileid=None, filelog=None):
721 """get a file context from this changeset"""
721 """get a file context from this changeset"""
722 if fileid is None:
722 if fileid is None:
723 fileid = self.filenode(path)
723 fileid = self.filenode(path)
724 return filectx(
724 return filectx(
725 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
725 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
726 )
726 )
727
727
728 def ancestor(self, c2, warn=False):
728 def ancestor(self, c2, warn=False):
729 """return the "best" ancestor context of self and c2
729 """return the "best" ancestor context of self and c2
730
730
731 If there are multiple candidates, it will show a message and check
731 If there are multiple candidates, it will show a message and check
732 merge.preferancestor configuration before falling back to the
732 merge.preferancestor configuration before falling back to the
733 revlog ancestor."""
733 revlog ancestor."""
734 # deal with workingctxs
734 # deal with workingctxs
735 n2 = c2._node
735 n2 = c2._node
736 if n2 is None:
736 if n2 is None:
737 n2 = c2._parents[0]._node
737 n2 = c2._parents[0]._node
738 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
738 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
739 if not cahs:
739 if not cahs:
740 anc = nullid
740 anc = nullid
741 elif len(cahs) == 1:
741 elif len(cahs) == 1:
742 anc = cahs[0]
742 anc = cahs[0]
743 else:
743 else:
744 # experimental config: merge.preferancestor
744 # experimental config: merge.preferancestor
745 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
745 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
746 try:
746 try:
747 ctx = scmutil.revsymbol(self._repo, r)
747 ctx = scmutil.revsymbol(self._repo, r)
748 except error.RepoLookupError:
748 except error.RepoLookupError:
749 continue
749 continue
750 anc = ctx.node()
750 anc = ctx.node()
751 if anc in cahs:
751 if anc in cahs:
752 break
752 break
753 else:
753 else:
754 anc = self._repo.changelog.ancestor(self._node, n2)
754 anc = self._repo.changelog.ancestor(self._node, n2)
755 if warn:
755 if warn:
756 self._repo.ui.status(
756 self._repo.ui.status(
757 (
757 (
758 _(b"note: using %s as ancestor of %s and %s\n")
758 _(b"note: using %s as ancestor of %s and %s\n")
759 % (short(anc), short(self._node), short(n2))
759 % (short(anc), short(self._node), short(n2))
760 )
760 )
761 + b''.join(
761 + b''.join(
762 _(
762 _(
763 b" alternatively, use --config "
763 b" alternatively, use --config "
764 b"merge.preferancestor=%s\n"
764 b"merge.preferancestor=%s\n"
765 )
765 )
766 % short(n)
766 % short(n)
767 for n in sorted(cahs)
767 for n in sorted(cahs)
768 if n != anc
768 if n != anc
769 )
769 )
770 )
770 )
771 return self._repo[anc]
771 return self._repo[anc]
772
772
773 def isancestorof(self, other):
773 def isancestorof(self, other):
774 """True if this changeset is an ancestor of other"""
774 """True if this changeset is an ancestor of other"""
775 return self._repo.changelog.isancestorrev(self._rev, other._rev)
775 return self._repo.changelog.isancestorrev(self._rev, other._rev)
776
776
777 def walk(self, match):
777 def walk(self, match):
778 '''Generates matching file names.'''
778 '''Generates matching file names.'''
779
779
780 # Wrap match.bad method to have message with nodeid
780 # Wrap match.bad method to have message with nodeid
781 def bad(fn, msg):
781 def bad(fn, msg):
782 # The manifest doesn't know about subrepos, so don't complain about
782 # The manifest doesn't know about subrepos, so don't complain about
783 # paths into valid subrepos.
783 # paths into valid subrepos.
784 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
784 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
785 return
785 return
786 match.bad(fn, _(b'no such file in rev %s') % self)
786 match.bad(fn, _(b'no such file in rev %s') % self)
787
787
788 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
788 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
789 return self._manifest.walk(m)
789 return self._manifest.walk(m)
790
790
791 def matches(self, match):
791 def matches(self, match):
792 return self.walk(match)
792 return self.walk(match)
793
793
794
794
795 class basefilectx(object):
795 class basefilectx(object):
796 """A filecontext object represents the common logic for its children:
796 """A filecontext object represents the common logic for its children:
797 filectx: read-only access to a filerevision that is already present
797 filectx: read-only access to a filerevision that is already present
798 in the repo,
798 in the repo,
799 workingfilectx: a filecontext that represents files from the working
799 workingfilectx: a filecontext that represents files from the working
800 directory,
800 directory,
801 memfilectx: a filecontext that represents files in-memory,
801 memfilectx: a filecontext that represents files in-memory,
802 """
802 """
803
803
804 @propertycache
804 @propertycache
805 def _filelog(self):
805 def _filelog(self):
806 return self._repo.file(self._path)
806 return self._repo.file(self._path)
807
807
808 @propertycache
808 @propertycache
809 def _changeid(self):
809 def _changeid(self):
810 if '_changectx' in self.__dict__:
810 if '_changectx' in self.__dict__:
811 return self._changectx.rev()
811 return self._changectx.rev()
812 elif '_descendantrev' in self.__dict__:
812 elif '_descendantrev' in self.__dict__:
813 # this file context was created from a revision with a known
813 # this file context was created from a revision with a known
814 # descendant, we can (lazily) correct for linkrev aliases
814 # descendant, we can (lazily) correct for linkrev aliases
815 return self._adjustlinkrev(self._descendantrev)
815 return self._adjustlinkrev(self._descendantrev)
816 else:
816 else:
817 return self._filelog.linkrev(self._filerev)
817 return self._filelog.linkrev(self._filerev)
818
818
819 @propertycache
819 @propertycache
820 def _filenode(self):
820 def _filenode(self):
821 if '_fileid' in self.__dict__:
821 if '_fileid' in self.__dict__:
822 return self._filelog.lookup(self._fileid)
822 return self._filelog.lookup(self._fileid)
823 else:
823 else:
824 return self._changectx.filenode(self._path)
824 return self._changectx.filenode(self._path)
825
825
826 @propertycache
826 @propertycache
827 def _filerev(self):
827 def _filerev(self):
828 return self._filelog.rev(self._filenode)
828 return self._filelog.rev(self._filenode)
829
829
830 @propertycache
830 @propertycache
831 def _repopath(self):
831 def _repopath(self):
832 return self._path
832 return self._path
833
833
834 def __nonzero__(self):
834 def __nonzero__(self):
835 try:
835 try:
836 self._filenode
836 self._filenode
837 return True
837 return True
838 except error.LookupError:
838 except error.LookupError:
839 # file is missing
839 # file is missing
840 return False
840 return False
841
841
842 __bool__ = __nonzero__
842 __bool__ = __nonzero__
843
843
844 def __bytes__(self):
844 def __bytes__(self):
845 try:
845 try:
846 return b"%s@%s" % (self.path(), self._changectx)
846 return b"%s@%s" % (self.path(), self._changectx)
847 except error.LookupError:
847 except error.LookupError:
848 return b"%s@???" % self.path()
848 return b"%s@???" % self.path()
849
849
850 __str__ = encoding.strmethod(__bytes__)
850 __str__ = encoding.strmethod(__bytes__)
851
851
852 def __repr__(self):
852 def __repr__(self):
853 return "<%s %s>" % (type(self).__name__, str(self))
853 return "<%s %s>" % (type(self).__name__, str(self))
854
854
855 def __hash__(self):
855 def __hash__(self):
856 try:
856 try:
857 return hash((self._path, self._filenode))
857 return hash((self._path, self._filenode))
858 except AttributeError:
858 except AttributeError:
859 return id(self)
859 return id(self)
860
860
861 def __eq__(self, other):
861 def __eq__(self, other):
862 try:
862 try:
863 return (
863 return (
864 type(self) == type(other)
864 type(self) == type(other)
865 and self._path == other._path
865 and self._path == other._path
866 and self._filenode == other._filenode
866 and self._filenode == other._filenode
867 )
867 )
868 except AttributeError:
868 except AttributeError:
869 return False
869 return False
870
870
871 def __ne__(self, other):
871 def __ne__(self, other):
872 return not (self == other)
872 return not (self == other)
873
873
874 def filerev(self):
874 def filerev(self):
875 return self._filerev
875 return self._filerev
876
876
877 def filenode(self):
877 def filenode(self):
878 return self._filenode
878 return self._filenode
879
879
880 @propertycache
880 @propertycache
881 def _flags(self):
881 def _flags(self):
882 return self._changectx.flags(self._path)
882 return self._changectx.flags(self._path)
883
883
884 def flags(self):
884 def flags(self):
885 return self._flags
885 return self._flags
886
886
887 def filelog(self):
887 def filelog(self):
888 return self._filelog
888 return self._filelog
889
889
890 def rev(self):
890 def rev(self):
891 return self._changeid
891 return self._changeid
892
892
893 def linkrev(self):
893 def linkrev(self):
894 return self._filelog.linkrev(self._filerev)
894 return self._filelog.linkrev(self._filerev)
895
895
896 def node(self):
896 def node(self):
897 return self._changectx.node()
897 return self._changectx.node()
898
898
899 def hex(self):
899 def hex(self):
900 return self._changectx.hex()
900 return self._changectx.hex()
901
901
902 def user(self):
902 def user(self):
903 return self._changectx.user()
903 return self._changectx.user()
904
904
905 def date(self):
905 def date(self):
906 return self._changectx.date()
906 return self._changectx.date()
907
907
908 def files(self):
908 def files(self):
909 return self._changectx.files()
909 return self._changectx.files()
910
910
911 def description(self):
911 def description(self):
912 return self._changectx.description()
912 return self._changectx.description()
913
913
914 def branch(self):
914 def branch(self):
915 return self._changectx.branch()
915 return self._changectx.branch()
916
916
917 def extra(self):
917 def extra(self):
918 return self._changectx.extra()
918 return self._changectx.extra()
919
919
920 def phase(self):
920 def phase(self):
921 return self._changectx.phase()
921 return self._changectx.phase()
922
922
923 def phasestr(self):
923 def phasestr(self):
924 return self._changectx.phasestr()
924 return self._changectx.phasestr()
925
925
926 def obsolete(self):
926 def obsolete(self):
927 return self._changectx.obsolete()
927 return self._changectx.obsolete()
928
928
929 def instabilities(self):
929 def instabilities(self):
930 return self._changectx.instabilities()
930 return self._changectx.instabilities()
931
931
932 def manifest(self):
932 def manifest(self):
933 return self._changectx.manifest()
933 return self._changectx.manifest()
934
934
935 def changectx(self):
935 def changectx(self):
936 return self._changectx
936 return self._changectx
937
937
938 def renamed(self):
938 def renamed(self):
939 return self._copied
939 return self._copied
940
940
941 def copysource(self):
941 def copysource(self):
942 return self._copied and self._copied[0]
942 return self._copied and self._copied[0]
943
943
944 def repo(self):
944 def repo(self):
945 return self._repo
945 return self._repo
946
946
947 def size(self):
947 def size(self):
948 return len(self.data())
948 return len(self.data())
949
949
950 def path(self):
950 def path(self):
951 return self._path
951 return self._path
952
952
953 def isbinary(self):
953 def isbinary(self):
954 try:
954 try:
955 return stringutil.binary(self.data())
955 return stringutil.binary(self.data())
956 except IOError:
956 except IOError:
957 return False
957 return False
958
958
959 def isexec(self):
959 def isexec(self):
960 return b'x' in self.flags()
960 return b'x' in self.flags()
961
961
962 def islink(self):
962 def islink(self):
963 return b'l' in self.flags()
963 return b'l' in self.flags()
964
964
965 def isabsent(self):
965 def isabsent(self):
966 """whether this filectx represents a file not in self._changectx
966 """whether this filectx represents a file not in self._changectx
967
967
968 This is mainly for merge code to detect change/delete conflicts. This is
968 This is mainly for merge code to detect change/delete conflicts. This is
969 expected to be True for all subclasses of basectx."""
969 expected to be True for all subclasses of basectx."""
970 return False
970 return False
971
971
972 _customcmp = False
972 _customcmp = False
973
973
974 def cmp(self, fctx):
974 def cmp(self, fctx):
975 """compare with other file context
975 """compare with other file context
976
976
977 returns True if different than fctx.
977 returns True if different than fctx.
978 """
978 """
979 if fctx._customcmp:
979 if fctx._customcmp:
980 return fctx.cmp(self)
980 return fctx.cmp(self)
981
981
982 if self._filenode is None:
982 if self._filenode is None:
983 raise error.ProgrammingError(
983 raise error.ProgrammingError(
984 b'filectx.cmp() must be reimplemented if not backed by revlog'
984 b'filectx.cmp() must be reimplemented if not backed by revlog'
985 )
985 )
986
986
987 if fctx._filenode is None:
987 if fctx._filenode is None:
988 if self._repo._encodefilterpats:
988 if self._repo._encodefilterpats:
989 # can't rely on size() because wdir content may be decoded
989 # can't rely on size() because wdir content may be decoded
990 return self._filelog.cmp(self._filenode, fctx.data())
990 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() - 4 == fctx.size():
991 if self.size() - 4 == fctx.size():
992 # size() can match:
992 # size() can match:
993 # if file data starts with '\1\n', empty metadata block is
993 # if file data starts with '\1\n', empty metadata block is
994 # prepended, which adds 4 bytes to filelog.size().
994 # prepended, which adds 4 bytes to filelog.size().
995 return self._filelog.cmp(self._filenode, fctx.data())
995 return self._filelog.cmp(self._filenode, fctx.data())
996 if self.size() == fctx.size() or self.flags() == b'l':
996 if self.size() == fctx.size() or self.flags() == b'l':
997 # size() matches: need to compare content
997 # size() matches: need to compare content
998 # issue6456: Always compare symlinks because size can represent
998 # issue6456: Always compare symlinks because size can represent
999 # encrypted string for EXT-4 encryption(fscrypt).
999 # encrypted string for EXT-4 encryption(fscrypt).
1000 return self._filelog.cmp(self._filenode, fctx.data())
1000 return self._filelog.cmp(self._filenode, fctx.data())
1001
1001
1002 # size() differs
1002 # size() differs
1003 return True
1003 return True
1004
1004
1005 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1005 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1006 """return the first ancestor of <srcrev> introducing <fnode>
1006 """return the first ancestor of <srcrev> introducing <fnode>
1007
1007
1008 If the linkrev of the file revision does not point to an ancestor of
1008 If the linkrev of the file revision does not point to an ancestor of
1009 srcrev, we'll walk down the ancestors until we find one introducing
1009 srcrev, we'll walk down the ancestors until we find one introducing
1010 this file revision.
1010 this file revision.
1011
1011
1012 :srcrev: the changeset revision we search ancestors from
1012 :srcrev: the changeset revision we search ancestors from
1013 :inclusive: if true, the src revision will also be checked
1013 :inclusive: if true, the src revision will also be checked
1014 :stoprev: an optional revision to stop the walk at. If no introduction
1014 :stoprev: an optional revision to stop the walk at. If no introduction
1015 of this file content could be found before this floor
1015 of this file content could be found before this floor
1016 revision, the function will returns "None" and stops its
1016 revision, the function will returns "None" and stops its
1017 iteration.
1017 iteration.
1018 """
1018 """
1019 repo = self._repo
1019 repo = self._repo
1020 cl = repo.unfiltered().changelog
1020 cl = repo.unfiltered().changelog
1021 mfl = repo.manifestlog
1021 mfl = repo.manifestlog
1022 # fetch the linkrev
1022 # fetch the linkrev
1023 lkr = self.linkrev()
1023 lkr = self.linkrev()
1024 if srcrev == lkr:
1024 if srcrev == lkr:
1025 return lkr
1025 return lkr
1026 # hack to reuse ancestor computation when searching for renames
1026 # hack to reuse ancestor computation when searching for renames
1027 memberanc = getattr(self, '_ancestrycontext', None)
1027 memberanc = getattr(self, '_ancestrycontext', None)
1028 iteranc = None
1028 iteranc = None
1029 if srcrev is None:
1029 if srcrev is None:
1030 # wctx case, used by workingfilectx during mergecopy
1030 # wctx case, used by workingfilectx during mergecopy
1031 revs = [p.rev() for p in self._repo[None].parents()]
1031 revs = [p.rev() for p in self._repo[None].parents()]
1032 inclusive = True # we skipped the real (revless) source
1032 inclusive = True # we skipped the real (revless) source
1033 else:
1033 else:
1034 revs = [srcrev]
1034 revs = [srcrev]
1035 if memberanc is None:
1035 if memberanc is None:
1036 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1037 # check if this linkrev is an ancestor of srcrev
1037 # check if this linkrev is an ancestor of srcrev
1038 if lkr not in memberanc:
1038 if lkr not in memberanc:
1039 if iteranc is None:
1039 if iteranc is None:
1040 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1040 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1041 fnode = self._filenode
1041 fnode = self._filenode
1042 path = self._path
1042 path = self._path
1043 for a in iteranc:
1043 for a in iteranc:
1044 if stoprev is not None and a < stoprev:
1044 if stoprev is not None and a < stoprev:
1045 return None
1045 return None
1046 ac = cl.read(a) # get changeset data (we avoid object creation)
1046 ac = cl.read(a) # get changeset data (we avoid object creation)
1047 if path in ac[3]: # checking the 'files' field.
1047 if path in ac[3]: # checking the 'files' field.
1048 # The file has been touched, check if the content is
1048 # The file has been touched, check if the content is
1049 # similar to the one we search for.
1049 # similar to the one we search for.
1050 if fnode == mfl[ac[0]].readfast().get(path):
1050 if fnode == mfl[ac[0]].readfast().get(path):
1051 return a
1051 return a
1052 # In theory, we should never get out of that loop without a result.
1052 # In theory, we should never get out of that loop without a result.
1053 # But if manifest uses a buggy file revision (not children of the
1053 # But if manifest uses a buggy file revision (not children of the
1054 # one it replaces) we could. Such a buggy situation will likely
1054 # one it replaces) we could. Such a buggy situation will likely
1055 # result is crash somewhere else at to some point.
1055 # result is crash somewhere else at to some point.
1056 return lkr
1056 return lkr
1057
1057
1058 def isintroducedafter(self, changelogrev):
1058 def isintroducedafter(self, changelogrev):
1059 """True if a filectx has been introduced after a given floor revision"""
1059 """True if a filectx has been introduced after a given floor revision"""
1060 if self.linkrev() >= changelogrev:
1060 if self.linkrev() >= changelogrev:
1061 return True
1061 return True
1062 introrev = self._introrev(stoprev=changelogrev)
1062 introrev = self._introrev(stoprev=changelogrev)
1063 if introrev is None:
1063 if introrev is None:
1064 return False
1064 return False
1065 return introrev >= changelogrev
1065 return introrev >= changelogrev
1066
1066
1067 def introrev(self):
1067 def introrev(self):
1068 """return the rev of the changeset which introduced this file revision
1068 """return the rev of the changeset which introduced this file revision
1069
1069
1070 This method is different from linkrev because it take into account the
1070 This method is different from linkrev because it take into account the
1071 changeset the filectx was created from. It ensures the returned
1071 changeset the filectx was created from. It ensures the returned
1072 revision is one of its ancestors. This prevents bugs from
1072 revision is one of its ancestors. This prevents bugs from
1073 'linkrev-shadowing' when a file revision is used by multiple
1073 'linkrev-shadowing' when a file revision is used by multiple
1074 changesets.
1074 changesets.
1075 """
1075 """
1076 return self._introrev()
1076 return self._introrev()
1077
1077
1078 def _introrev(self, stoprev=None):
1078 def _introrev(self, stoprev=None):
1079 """
1079 """
1080 Same as `introrev` but, with an extra argument to limit changelog
1080 Same as `introrev` but, with an extra argument to limit changelog
1081 iteration range in some internal usecase.
1081 iteration range in some internal usecase.
1082
1082
1083 If `stoprev` is set, the `introrev` will not be searched past that
1083 If `stoprev` is set, the `introrev` will not be searched past that
1084 `stoprev` revision and "None" might be returned. This is useful to
1084 `stoprev` revision and "None" might be returned. This is useful to
1085 limit the iteration range.
1085 limit the iteration range.
1086 """
1086 """
1087 toprev = None
1087 toprev = None
1088 attrs = vars(self)
1088 attrs = vars(self)
1089 if '_changeid' in attrs:
1089 if '_changeid' in attrs:
1090 # We have a cached value already
1090 # We have a cached value already
1091 toprev = self._changeid
1091 toprev = self._changeid
1092 elif '_changectx' in attrs:
1092 elif '_changectx' in attrs:
1093 # We know which changelog entry we are coming from
1093 # We know which changelog entry we are coming from
1094 toprev = self._changectx.rev()
1094 toprev = self._changectx.rev()
1095
1095
1096 if toprev is not None:
1096 if toprev is not None:
1097 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1097 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1098 elif '_descendantrev' in attrs:
1098 elif '_descendantrev' in attrs:
1099 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1099 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1100 # be nice and cache the result of the computation
1100 # be nice and cache the result of the computation
1101 if introrev is not None:
1101 if introrev is not None:
1102 self._changeid = introrev
1102 self._changeid = introrev
1103 return introrev
1103 return introrev
1104 else:
1104 else:
1105 return self.linkrev()
1105 return self.linkrev()
1106
1106
1107 def introfilectx(self):
1107 def introfilectx(self):
1108 """Return filectx having identical contents, but pointing to the
1108 """Return filectx having identical contents, but pointing to the
1109 changeset revision where this filectx was introduced"""
1109 changeset revision where this filectx was introduced"""
1110 introrev = self.introrev()
1110 introrev = self.introrev()
1111 if self.rev() == introrev:
1111 if self.rev() == introrev:
1112 return self
1112 return self
1113 return self.filectx(self.filenode(), changeid=introrev)
1113 return self.filectx(self.filenode(), changeid=introrev)
1114
1114
1115 def _parentfilectx(self, path, fileid, filelog):
1115 def _parentfilectx(self, path, fileid, filelog):
1116 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1116 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1117 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1117 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1118 if '_changeid' in vars(self) or '_changectx' in vars(self):
1118 if '_changeid' in vars(self) or '_changectx' in vars(self):
1119 # If self is associated with a changeset (probably explicitly
1119 # If self is associated with a changeset (probably explicitly
1120 # fed), ensure the created filectx is associated with a
1120 # fed), ensure the created filectx is associated with a
1121 # changeset that is an ancestor of self.changectx.
1121 # changeset that is an ancestor of self.changectx.
1122 # This lets us later use _adjustlinkrev to get a correct link.
1122 # This lets us later use _adjustlinkrev to get a correct link.
1123 fctx._descendantrev = self.rev()
1123 fctx._descendantrev = self.rev()
1124 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1125 elif '_descendantrev' in vars(self):
1125 elif '_descendantrev' in vars(self):
1126 # Otherwise propagate _descendantrev if we have one associated.
1126 # Otherwise propagate _descendantrev if we have one associated.
1127 fctx._descendantrev = self._descendantrev
1127 fctx._descendantrev = self._descendantrev
1128 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1128 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1129 return fctx
1129 return fctx
1130
1130
1131 def parents(self):
1131 def parents(self):
1132 _path = self._path
1132 _path = self._path
1133 fl = self._filelog
1133 fl = self._filelog
1134 parents = self._filelog.parents(self._filenode)
1134 parents = self._filelog.parents(self._filenode)
1135 pl = [(_path, node, fl) for node in parents if node != nullid]
1135 pl = [(_path, node, fl) for node in parents if node != nullid]
1136
1136
1137 r = fl.renamed(self._filenode)
1137 r = fl.renamed(self._filenode)
1138 if r:
1138 if r:
1139 # - In the simple rename case, both parent are nullid, pl is empty.
1139 # - In the simple rename case, both parent are nullid, pl is empty.
1140 # - In case of merge, only one of the parent is null id and should
1140 # - In case of merge, only one of the parent is null id and should
1141 # be replaced with the rename information. This parent is -always-
1141 # be replaced with the rename information. This parent is -always-
1142 # the first one.
1142 # the first one.
1143 #
1143 #
1144 # As null id have always been filtered out in the previous list
1144 # As null id have always been filtered out in the previous list
1145 # comprehension, inserting to 0 will always result in "replacing
1145 # comprehension, inserting to 0 will always result in "replacing
1146 # first nullid parent with rename information.
1146 # first nullid parent with rename information.
1147 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1148
1148
1149 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1150
1150
1151 def p1(self):
1151 def p1(self):
1152 return self.parents()[0]
1152 return self.parents()[0]
1153
1153
1154 def p2(self):
1154 def p2(self):
1155 p = self.parents()
1155 p = self.parents()
1156 if len(p) == 2:
1156 if len(p) == 2:
1157 return p[1]
1157 return p[1]
1158 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1159
1159
1160 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1161 """Returns a list of annotateline objects for each line in the file
1161 """Returns a list of annotateline objects for each line in the file
1162
1162
1163 - line.fctx is the filectx of the node where that line was last changed
1163 - line.fctx is the filectx of the node where that line was last changed
1164 - line.lineno is the line number at the first appearance in the managed
1164 - line.lineno is the line number at the first appearance in the managed
1165 file
1165 file
1166 - line.text is the data on that line (including newline character)
1166 - line.text is the data on that line (including newline character)
1167 """
1167 """
1168 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1169
1169
1170 def parents(f):
1170 def parents(f):
1171 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1172 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1173 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1174 # isn't an ancestor of the srcrev.
1174 # isn't an ancestor of the srcrev.
1175 f._changeid
1175 f._changeid
1176 pl = f.parents()
1176 pl = f.parents()
1177
1177
1178 # Don't return renamed parents if we aren't following.
1178 # Don't return renamed parents if we aren't following.
1179 if not follow:
1179 if not follow:
1180 pl = [p for p in pl if p.path() == f.path()]
1180 pl = [p for p in pl if p.path() == f.path()]
1181
1181
1182 # renamed filectx won't have a filelog yet, so set it
1182 # renamed filectx won't have a filelog yet, so set it
1183 # from the cache to save time
1183 # from the cache to save time
1184 for p in pl:
1184 for p in pl:
1185 if not '_filelog' in p.__dict__:
1185 if not '_filelog' in p.__dict__:
1186 p._filelog = getlog(p.path())
1186 p._filelog = getlog(p.path())
1187
1187
1188 return pl
1188 return pl
1189
1189
1190 # use linkrev to find the first changeset where self appeared
1190 # use linkrev to find the first changeset where self appeared
1191 base = self.introfilectx()
1191 base = self.introfilectx()
1192 if getattr(base, '_ancestrycontext', None) is None:
1192 if getattr(base, '_ancestrycontext', None) is None:
1193 # it is safe to use an unfiltered repository here because we are
1193 # it is safe to use an unfiltered repository here because we are
1194 # walking ancestors only.
1194 # walking ancestors only.
1195 cl = self._repo.unfiltered().changelog
1195 cl = self._repo.unfiltered().changelog
1196 if base.rev() is None:
1196 if base.rev() is None:
1197 # wctx is not inclusive, but works because _ancestrycontext
1197 # wctx is not inclusive, but works because _ancestrycontext
1198 # is used to test filelog revisions
1198 # is used to test filelog revisions
1199 ac = cl.ancestors(
1199 ac = cl.ancestors(
1200 [p.rev() for p in base.parents()], inclusive=True
1200 [p.rev() for p in base.parents()], inclusive=True
1201 )
1201 )
1202 else:
1202 else:
1203 ac = cl.ancestors([base.rev()], inclusive=True)
1203 ac = cl.ancestors([base.rev()], inclusive=True)
1204 base._ancestrycontext = ac
1204 base._ancestrycontext = ac
1205
1205
1206 return dagop.annotate(
1206 return dagop.annotate(
1207 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 base, parents, skiprevs=skiprevs, diffopts=diffopts
1208 )
1208 )
1209
1209
1210 def ancestors(self, followfirst=False):
1210 def ancestors(self, followfirst=False):
1211 visit = {}
1211 visit = {}
1212 c = self
1212 c = self
1213 if followfirst:
1213 if followfirst:
1214 cut = 1
1214 cut = 1
1215 else:
1215 else:
1216 cut = None
1216 cut = None
1217
1217
1218 while True:
1218 while True:
1219 for parent in c.parents()[:cut]:
1219 for parent in c.parents()[:cut]:
1220 visit[(parent.linkrev(), parent.filenode())] = parent
1220 visit[(parent.linkrev(), parent.filenode())] = parent
1221 if not visit:
1221 if not visit:
1222 break
1222 break
1223 c = visit.pop(max(visit))
1223 c = visit.pop(max(visit))
1224 yield c
1224 yield c
1225
1225
1226 def decodeddata(self):
1226 def decodeddata(self):
1227 """Returns `data()` after running repository decoding filters.
1227 """Returns `data()` after running repository decoding filters.
1228
1228
1229 This is often equivalent to how the data would be expressed on disk.
1229 This is often equivalent to how the data would be expressed on disk.
1230 """
1230 """
1231 return self._repo.wwritedata(self.path(), self.data())
1231 return self._repo.wwritedata(self.path(), self.data())
1232
1232
1233
1233
1234 class filectx(basefilectx):
1234 class filectx(basefilectx):
1235 """A filecontext object makes access to data related to a particular
1235 """A filecontext object makes access to data related to a particular
1236 filerevision convenient."""
1236 filerevision convenient."""
1237
1237
1238 def __init__(
1238 def __init__(
1239 self,
1239 self,
1240 repo,
1240 repo,
1241 path,
1241 path,
1242 changeid=None,
1242 changeid=None,
1243 fileid=None,
1243 fileid=None,
1244 filelog=None,
1244 filelog=None,
1245 changectx=None,
1245 changectx=None,
1246 ):
1246 ):
1247 """changeid must be a revision number, if specified.
1247 """changeid must be a revision number, if specified.
1248 fileid can be a file revision or node."""
1248 fileid can be a file revision or node."""
1249 self._repo = repo
1249 self._repo = repo
1250 self._path = path
1250 self._path = path
1251
1251
1252 assert (
1252 assert (
1253 changeid is not None or fileid is not None or changectx is not None
1253 changeid is not None or fileid is not None or changectx is not None
1254 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1255 changeid,
1255 changeid,
1256 fileid,
1256 fileid,
1257 changectx,
1257 changectx,
1258 )
1258 )
1259
1259
1260 if filelog is not None:
1260 if filelog is not None:
1261 self._filelog = filelog
1261 self._filelog = filelog
1262
1262
1263 if changeid is not None:
1263 if changeid is not None:
1264 self._changeid = changeid
1264 self._changeid = changeid
1265 if changectx is not None:
1265 if changectx is not None:
1266 self._changectx = changectx
1266 self._changectx = changectx
1267 if fileid is not None:
1267 if fileid is not None:
1268 self._fileid = fileid
1268 self._fileid = fileid
1269
1269
1270 @propertycache
1270 @propertycache
1271 def _changectx(self):
1271 def _changectx(self):
1272 try:
1272 try:
1273 return self._repo[self._changeid]
1273 return self._repo[self._changeid]
1274 except error.FilteredRepoLookupError:
1274 except error.FilteredRepoLookupError:
1275 # Linkrev may point to any revision in the repository. When the
1275 # Linkrev may point to any revision in the repository. When the
1276 # repository is filtered this may lead to `filectx` trying to build
1276 # repository is filtered this may lead to `filectx` trying to build
1277 # `changectx` for filtered revision. In such case we fallback to
1277 # `changectx` for filtered revision. In such case we fallback to
1278 # creating `changectx` on the unfiltered version of the reposition.
1278 # creating `changectx` on the unfiltered version of the reposition.
1279 # This fallback should not be an issue because `changectx` from
1279 # This fallback should not be an issue because `changectx` from
1280 # `filectx` are not used in complex operations that care about
1280 # `filectx` are not used in complex operations that care about
1281 # filtering.
1281 # filtering.
1282 #
1282 #
1283 # This fallback is a cheap and dirty fix that prevent several
1283 # This fallback is a cheap and dirty fix that prevent several
1284 # crashes. It does not ensure the behavior is correct. However the
1284 # crashes. It does not ensure the behavior is correct. However the
1285 # behavior was not correct before filtering either and "incorrect
1285 # behavior was not correct before filtering either and "incorrect
1286 # behavior" is seen as better as "crash"
1286 # behavior" is seen as better as "crash"
1287 #
1287 #
1288 # Linkrevs have several serious troubles with filtering that are
1288 # Linkrevs have several serious troubles with filtering that are
1289 # complicated to solve. Proper handling of the issue here should be
1289 # complicated to solve. Proper handling of the issue here should be
1290 # considered when solving linkrev issue are on the table.
1290 # considered when solving linkrev issue are on the table.
1291 return self._repo.unfiltered()[self._changeid]
1291 return self._repo.unfiltered()[self._changeid]
1292
1292
1293 def filectx(self, fileid, changeid=None):
1293 def filectx(self, fileid, changeid=None):
1294 """opens an arbitrary revision of the file without
1294 """opens an arbitrary revision of the file without
1295 opening a new filelog"""
1295 opening a new filelog"""
1296 return filectx(
1296 return filectx(
1297 self._repo,
1297 self._repo,
1298 self._path,
1298 self._path,
1299 fileid=fileid,
1299 fileid=fileid,
1300 filelog=self._filelog,
1300 filelog=self._filelog,
1301 changeid=changeid,
1301 changeid=changeid,
1302 )
1302 )
1303
1303
1304 def rawdata(self):
1304 def rawdata(self):
1305 return self._filelog.rawdata(self._filenode)
1305 return self._filelog.rawdata(self._filenode)
1306
1306
1307 def rawflags(self):
1307 def rawflags(self):
1308 """low-level revlog flags"""
1308 """low-level revlog flags"""
1309 return self._filelog.flags(self._filerev)
1309 return self._filelog.flags(self._filerev)
1310
1310
1311 def data(self):
1311 def data(self):
1312 try:
1312 try:
1313 return self._filelog.read(self._filenode)
1313 return self._filelog.read(self._filenode)
1314 except error.CensoredNodeError:
1314 except error.CensoredNodeError:
1315 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1316 return b""
1316 return b""
1317 raise error.Abort(
1317 raise error.Abort(
1318 _(b"censored node: %s") % short(self._filenode),
1318 _(b"censored node: %s") % short(self._filenode),
1319 hint=_(b"set censor.policy to ignore errors"),
1319 hint=_(b"set censor.policy to ignore errors"),
1320 )
1320 )
1321
1321
1322 def size(self):
1322 def size(self):
1323 return self._filelog.size(self._filerev)
1323 return self._filelog.size(self._filerev)
1324
1324
1325 @propertycache
1325 @propertycache
1326 def _copied(self):
1326 def _copied(self):
1327 """check if file was actually renamed in this changeset revision
1327 """check if file was actually renamed in this changeset revision
1328
1328
1329 If rename logged in file revision, we report copy for changeset only
1329 If rename logged in file revision, we report copy for changeset only
1330 if file revisions linkrev points back to the changeset in question
1330 if file revisions linkrev points back to the changeset in question
1331 or both changeset parents contain different file revisions.
1331 or both changeset parents contain different file revisions.
1332 """
1332 """
1333
1333
1334 renamed = self._filelog.renamed(self._filenode)
1334 renamed = self._filelog.renamed(self._filenode)
1335 if not renamed:
1335 if not renamed:
1336 return None
1336 return None
1337
1337
1338 if self.rev() == self.linkrev():
1338 if self.rev() == self.linkrev():
1339 return renamed
1339 return renamed
1340
1340
1341 name = self.path()
1341 name = self.path()
1342 fnode = self._filenode
1342 fnode = self._filenode
1343 for p in self._changectx.parents():
1343 for p in self._changectx.parents():
1344 try:
1344 try:
1345 if fnode == p.filenode(name):
1345 if fnode == p.filenode(name):
1346 return None
1346 return None
1347 except error.LookupError:
1347 except error.LookupError:
1348 pass
1348 pass
1349 return renamed
1349 return renamed
1350
1350
1351 def children(self):
1351 def children(self):
1352 # hard for renames
1352 # hard for renames
1353 c = self._filelog.children(self._filenode)
1353 c = self._filelog.children(self._filenode)
1354 return [
1354 return [
1355 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1356 for x in c
1356 for x in c
1357 ]
1357 ]
1358
1358
1359
1359
1360 class committablectx(basectx):
1360 class committablectx(basectx):
1361 """A committablectx object provides common functionality for a context that
1361 """A committablectx object provides common functionality for a context that
1362 wants the ability to commit, e.g. workingctx or memctx."""
1362 wants the ability to commit, e.g. workingctx or memctx."""
1363
1363
1364 def __init__(
1364 def __init__(
1365 self,
1365 self,
1366 repo,
1366 repo,
1367 text=b"",
1367 text=b"",
1368 user=None,
1368 user=None,
1369 date=None,
1369 date=None,
1370 extra=None,
1370 extra=None,
1371 changes=None,
1371 changes=None,
1372 branch=None,
1372 branch=None,
1373 ):
1373 ):
1374 super(committablectx, self).__init__(repo)
1374 super(committablectx, self).__init__(repo)
1375 self._rev = None
1375 self._rev = None
1376 self._node = None
1376 self._node = None
1377 self._text = text
1377 self._text = text
1378 if date:
1378 if date:
1379 self._date = dateutil.parsedate(date)
1379 self._date = dateutil.parsedate(date)
1380 if user:
1380 if user:
1381 self._user = user
1381 self._user = user
1382 if changes:
1382 if changes:
1383 self._status = changes
1383 self._status = changes
1384
1384
1385 self._extra = {}
1385 self._extra = {}
1386 if extra:
1386 if extra:
1387 self._extra = extra.copy()
1387 self._extra = extra.copy()
1388 if branch is not None:
1388 if branch is not None:
1389 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 self._extra[b'branch'] = encoding.fromlocal(branch)
1390 if not self._extra.get(b'branch'):
1390 if not self._extra.get(b'branch'):
1391 self._extra[b'branch'] = b'default'
1391 self._extra[b'branch'] = b'default'
1392
1392
1393 def __bytes__(self):
1393 def __bytes__(self):
1394 return bytes(self._parents[0]) + b"+"
1394 return bytes(self._parents[0]) + b"+"
1395
1395
1396 __str__ = encoding.strmethod(__bytes__)
1396 __str__ = encoding.strmethod(__bytes__)
1397
1397
1398 def __nonzero__(self):
1398 def __nonzero__(self):
1399 return True
1399 return True
1400
1400
1401 __bool__ = __nonzero__
1401 __bool__ = __nonzero__
1402
1402
1403 @propertycache
1403 @propertycache
1404 def _status(self):
1404 def _status(self):
1405 return self._repo.status()
1405 return self._repo.status()
1406
1406
1407 @propertycache
1407 @propertycache
1408 def _user(self):
1408 def _user(self):
1409 return self._repo.ui.username()
1409 return self._repo.ui.username()
1410
1410
1411 @propertycache
1411 @propertycache
1412 def _date(self):
1412 def _date(self):
1413 ui = self._repo.ui
1413 ui = self._repo.ui
1414 date = ui.configdate(b'devel', b'default-date')
1414 date = ui.configdate(b'devel', b'default-date')
1415 if date is None:
1415 if date is None:
1416 date = dateutil.makedate()
1416 date = dateutil.makedate()
1417 return date
1417 return date
1418
1418
1419 def subrev(self, subpath):
1419 def subrev(self, subpath):
1420 return None
1420 return None
1421
1421
1422 def manifestnode(self):
1422 def manifestnode(self):
1423 return None
1423 return None
1424
1424
1425 def user(self):
1425 def user(self):
1426 return self._user or self._repo.ui.username()
1426 return self._user or self._repo.ui.username()
1427
1427
1428 def date(self):
1428 def date(self):
1429 return self._date
1429 return self._date
1430
1430
1431 def description(self):
1431 def description(self):
1432 return self._text
1432 return self._text
1433
1433
1434 def files(self):
1434 def files(self):
1435 return sorted(
1435 return sorted(
1436 self._status.modified + self._status.added + self._status.removed
1436 self._status.modified + self._status.added + self._status.removed
1437 )
1437 )
1438
1438
1439 def modified(self):
1439 def modified(self):
1440 return self._status.modified
1440 return self._status.modified
1441
1441
1442 def added(self):
1442 def added(self):
1443 return self._status.added
1443 return self._status.added
1444
1444
1445 def removed(self):
1445 def removed(self):
1446 return self._status.removed
1446 return self._status.removed
1447
1447
1448 def deleted(self):
1448 def deleted(self):
1449 return self._status.deleted
1449 return self._status.deleted
1450
1450
1451 filesmodified = modified
1451 filesmodified = modified
1452 filesadded = added
1452 filesadded = added
1453 filesremoved = removed
1453 filesremoved = removed
1454
1454
1455 def branch(self):
1455 def branch(self):
1456 return encoding.tolocal(self._extra[b'branch'])
1456 return encoding.tolocal(self._extra[b'branch'])
1457
1457
1458 def closesbranch(self):
1458 def closesbranch(self):
1459 return b'close' in self._extra
1459 return b'close' in self._extra
1460
1460
1461 def extra(self):
1461 def extra(self):
1462 return self._extra
1462 return self._extra
1463
1463
1464 def isinmemory(self):
1464 def isinmemory(self):
1465 return False
1465 return False
1466
1466
1467 def tags(self):
1467 def tags(self):
1468 return []
1468 return []
1469
1469
1470 def bookmarks(self):
1470 def bookmarks(self):
1471 b = []
1471 b = []
1472 for p in self.parents():
1472 for p in self.parents():
1473 b.extend(p.bookmarks())
1473 b.extend(p.bookmarks())
1474 return b
1474 return b
1475
1475
1476 def phase(self):
1476 def phase(self):
1477 phase = phases.newcommitphase(self._repo.ui)
1477 phase = phases.newcommitphase(self._repo.ui)
1478 for p in self.parents():
1478 for p in self.parents():
1479 phase = max(phase, p.phase())
1479 phase = max(phase, p.phase())
1480 return phase
1480 return phase
1481
1481
1482 def hidden(self):
1482 def hidden(self):
1483 return False
1483 return False
1484
1484
1485 def children(self):
1485 def children(self):
1486 return []
1486 return []
1487
1487
1488 def flags(self, path):
1488 def flags(self, path):
1489 if '_manifest' in self.__dict__:
1489 if '_manifest' in self.__dict__:
1490 try:
1490 try:
1491 return self._manifest.flags(path)
1491 return self._manifest.flags(path)
1492 except KeyError:
1492 except KeyError:
1493 return b''
1493 return b''
1494
1494
1495 try:
1495 try:
1496 return self._flagfunc(path)
1496 return self._flagfunc(path)
1497 except OSError:
1497 except OSError:
1498 return b''
1498 return b''
1499
1499
1500 def ancestor(self, c2):
1500 def ancestor(self, c2):
1501 """return the "best" ancestor context of self and c2"""
1501 """return the "best" ancestor context of self and c2"""
1502 return self._parents[0].ancestor(c2) # punt on two parents for now
1502 return self._parents[0].ancestor(c2) # punt on two parents for now
1503
1503
1504 def ancestors(self):
1504 def ancestors(self):
1505 for p in self._parents:
1505 for p in self._parents:
1506 yield p
1506 yield p
1507 for a in self._repo.changelog.ancestors(
1507 for a in self._repo.changelog.ancestors(
1508 [p.rev() for p in self._parents]
1508 [p.rev() for p in self._parents]
1509 ):
1509 ):
1510 yield self._repo[a]
1510 yield self._repo[a]
1511
1511
1512 def markcommitted(self, node):
1512 def markcommitted(self, node):
1513 """Perform post-commit cleanup necessary after committing this ctx
1513 """Perform post-commit cleanup necessary after committing this ctx
1514
1514
1515 Specifically, this updates backing stores this working context
1515 Specifically, this updates backing stores this working context
1516 wraps to reflect the fact that the changes reflected by this
1516 wraps to reflect the fact that the changes reflected by this
1517 workingctx have been committed. For example, it marks
1517 workingctx have been committed. For example, it marks
1518 modified and added files as normal in the dirstate.
1518 modified and added files as normal in the dirstate.
1519
1519
1520 """
1520 """
1521
1521
1522 def dirty(self, missing=False, merge=True, branch=True):
1522 def dirty(self, missing=False, merge=True, branch=True):
1523 return False
1523 return False
1524
1524
1525
1525
1526 class workingctx(committablectx):
1526 class workingctx(committablectx):
1527 """A workingctx object makes access to data related to
1527 """A workingctx object makes access to data related to
1528 the current working directory convenient.
1528 the current working directory convenient.
1529 date - any valid date string or (unixtime, offset), or None.
1529 date - any valid date string or (unixtime, offset), or None.
1530 user - username string, or None.
1530 user - username string, or None.
1531 extra - a dictionary of extra values, or None.
1531 extra - a dictionary of extra values, or None.
1532 changes - a list of file lists as returned by localrepo.status()
1532 changes - a list of file lists as returned by localrepo.status()
1533 or None to use the repository status.
1533 or None to use the repository status.
1534 """
1534 """
1535
1535
1536 def __init__(
1536 def __init__(
1537 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1537 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1538 ):
1538 ):
1539 branch = None
1539 branch = None
1540 if not extra or b'branch' not in extra:
1540 if not extra or b'branch' not in extra:
1541 try:
1541 try:
1542 branch = repo.dirstate.branch()
1542 branch = repo.dirstate.branch()
1543 except UnicodeDecodeError:
1543 except UnicodeDecodeError:
1544 raise error.Abort(_(b'branch name not in UTF-8!'))
1544 raise error.Abort(_(b'branch name not in UTF-8!'))
1545 super(workingctx, self).__init__(
1545 super(workingctx, self).__init__(
1546 repo, text, user, date, extra, changes, branch=branch
1546 repo, text, user, date, extra, changes, branch=branch
1547 )
1547 )
1548
1548
1549 def __iter__(self):
1549 def __iter__(self):
1550 d = self._repo.dirstate
1550 d = self._repo.dirstate
1551 for f in d:
1551 for f in d:
1552 if d[f] != b'r':
1552 if d[f] != b'r':
1553 yield f
1553 yield f
1554
1554
1555 def __contains__(self, key):
1555 def __contains__(self, key):
1556 return self._repo.dirstate[key] not in b"?r"
1556 return self._repo.dirstate[key] not in b"?r"
1557
1557
1558 def hex(self):
1558 def hex(self):
1559 return wdirhex
1559 return wdirhex
1560
1560
1561 @propertycache
1561 @propertycache
1562 def _parents(self):
1562 def _parents(self):
1563 p = self._repo.dirstate.parents()
1563 p = self._repo.dirstate.parents()
1564 if p[1] == nullid:
1564 if p[1] == nullid:
1565 p = p[:-1]
1565 p = p[:-1]
1566 # use unfiltered repo to delay/avoid loading obsmarkers
1566 # use unfiltered repo to delay/avoid loading obsmarkers
1567 unfi = self._repo.unfiltered()
1567 unfi = self._repo.unfiltered()
1568 return [
1568 return [
1569 changectx(
1569 changectx(
1570 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1570 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1571 )
1571 )
1572 for n in p
1572 for n in p
1573 ]
1573 ]
1574
1574
1575 def setparents(self, p1node, p2node=nullid):
1575 def setparents(self, p1node, p2node=nullid):
1576 dirstate = self._repo.dirstate
1576 dirstate = self._repo.dirstate
1577 with dirstate.parentchange():
1577 with dirstate.parentchange():
1578 copies = dirstate.setparents(p1node, p2node)
1578 copies = dirstate.setparents(p1node, p2node)
1579 pctx = self._repo[p1node]
1579 pctx = self._repo[p1node]
1580 if copies:
1580 if copies:
1581 # Adjust copy records, the dirstate cannot do it, it
1581 # Adjust copy records, the dirstate cannot do it, it
1582 # requires access to parents manifests. Preserve them
1582 # requires access to parents manifests. Preserve them
1583 # only for entries added to first parent.
1583 # only for entries added to first parent.
1584 for f in copies:
1584 for f in copies:
1585 if f not in pctx and copies[f] in pctx:
1585 if f not in pctx and copies[f] in pctx:
1586 dirstate.copy(copies[f], f)
1586 dirstate.copy(copies[f], f)
1587 if p2node == nullid:
1587 if p2node == nullid:
1588 for f, s in sorted(dirstate.copies().items()):
1588 for f, s in sorted(dirstate.copies().items()):
1589 if f not in pctx and s not in pctx:
1589 if f not in pctx and s not in pctx:
1590 dirstate.copy(None, f)
1590 dirstate.copy(None, f)
1591
1591
1592 def _fileinfo(self, path):
1592 def _fileinfo(self, path):
1593 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1593 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1594 self._manifest
1594 self._manifest
1595 return super(workingctx, self)._fileinfo(path)
1595 return super(workingctx, self)._fileinfo(path)
1596
1596
1597 def _buildflagfunc(self):
1597 def _buildflagfunc(self):
1598 # Create a fallback function for getting file flags when the
1598 # Create a fallback function for getting file flags when the
1599 # filesystem doesn't support them
1599 # filesystem doesn't support them
1600
1600
1601 copiesget = self._repo.dirstate.copies().get
1601 copiesget = self._repo.dirstate.copies().get
1602 parents = self.parents()
1602 parents = self.parents()
1603 if len(parents) < 2:
1603 if len(parents) < 2:
1604 # when we have one parent, it's easy: copy from parent
1604 # when we have one parent, it's easy: copy from parent
1605 man = parents[0].manifest()
1605 man = parents[0].manifest()
1606
1606
1607 def func(f):
1607 def func(f):
1608 f = copiesget(f, f)
1608 f = copiesget(f, f)
1609 return man.flags(f)
1609 return man.flags(f)
1610
1610
1611 else:
1611 else:
1612 # merges are tricky: we try to reconstruct the unstored
1612 # merges are tricky: we try to reconstruct the unstored
1613 # result from the merge (issue1802)
1613 # result from the merge (issue1802)
1614 p1, p2 = parents
1614 p1, p2 = parents
1615 pa = p1.ancestor(p2)
1615 pa = p1.ancestor(p2)
1616 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1616 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1617
1617
1618 def func(f):
1618 def func(f):
1619 f = copiesget(f, f) # may be wrong for merges with copies
1619 f = copiesget(f, f) # may be wrong for merges with copies
1620 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1620 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1621 if fl1 == fl2:
1621 if fl1 == fl2:
1622 return fl1
1622 return fl1
1623 if fl1 == fla:
1623 if fl1 == fla:
1624 return fl2
1624 return fl2
1625 if fl2 == fla:
1625 if fl2 == fla:
1626 return fl1
1626 return fl1
1627 return b'' # punt for conflicts
1627 return b'' # punt for conflicts
1628
1628
1629 return func
1629 return func
1630
1630
1631 @propertycache
1631 @propertycache
1632 def _flagfunc(self):
1632 def _flagfunc(self):
1633 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1633 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1634
1634
1635 def flags(self, path):
1635 def flags(self, path):
1636 try:
1636 try:
1637 return self._flagfunc(path)
1637 return self._flagfunc(path)
1638 except OSError:
1638 except OSError:
1639 return b''
1639 return b''
1640
1640
1641 def filectx(self, path, filelog=None):
1641 def filectx(self, path, filelog=None):
1642 """get a file context from the working directory"""
1642 """get a file context from the working directory"""
1643 return workingfilectx(
1643 return workingfilectx(
1644 self._repo, path, workingctx=self, filelog=filelog
1644 self._repo, path, workingctx=self, filelog=filelog
1645 )
1645 )
1646
1646
1647 def dirty(self, missing=False, merge=True, branch=True):
1647 def dirty(self, missing=False, merge=True, branch=True):
1648 """check whether a working directory is modified"""
1648 """check whether a working directory is modified"""
1649 # check subrepos first
1649 # check subrepos first
1650 for s in sorted(self.substate):
1650 for s in sorted(self.substate):
1651 if self.sub(s).dirty(missing=missing):
1651 if self.sub(s).dirty(missing=missing):
1652 return True
1652 return True
1653 # check current working dir
1653 # check current working dir
1654 return (
1654 return (
1655 (merge and self.p2())
1655 (merge and self.p2())
1656 or (branch and self.branch() != self.p1().branch())
1656 or (branch and self.branch() != self.p1().branch())
1657 or self.modified()
1657 or self.modified()
1658 or self.added()
1658 or self.added()
1659 or self.removed()
1659 or self.removed()
1660 or (missing and self.deleted())
1660 or (missing and self.deleted())
1661 )
1661 )
1662
1662
1663 def add(self, list, prefix=b""):
1663 def add(self, list, prefix=b""):
1664 with self._repo.wlock():
1664 with self._repo.wlock():
1665 ui, ds = self._repo.ui, self._repo.dirstate
1665 ui, ds = self._repo.ui, self._repo.dirstate
1666 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1666 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1667 rejected = []
1667 rejected = []
1668 lstat = self._repo.wvfs.lstat
1668 lstat = self._repo.wvfs.lstat
1669 for f in list:
1669 for f in list:
1670 # ds.pathto() returns an absolute file when this is invoked from
1670 # ds.pathto() returns an absolute file when this is invoked from
1671 # the keyword extension. That gets flagged as non-portable on
1671 # the keyword extension. That gets flagged as non-portable on
1672 # Windows, since it contains the drive letter and colon.
1672 # Windows, since it contains the drive letter and colon.
1673 scmutil.checkportable(ui, os.path.join(prefix, f))
1673 scmutil.checkportable(ui, os.path.join(prefix, f))
1674 try:
1674 try:
1675 st = lstat(f)
1675 st = lstat(f)
1676 except OSError:
1676 except OSError:
1677 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1677 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1678 rejected.append(f)
1678 rejected.append(f)
1679 continue
1679 continue
1680 limit = ui.configbytes(b'ui', b'large-file-limit')
1680 limit = ui.configbytes(b'ui', b'large-file-limit')
1681 if limit != 0 and st.st_size > limit:
1681 if limit != 0 and st.st_size > limit:
1682 ui.warn(
1682 ui.warn(
1683 _(
1683 _(
1684 b"%s: up to %d MB of RAM may be required "
1684 b"%s: up to %d MB of RAM may be required "
1685 b"to manage this file\n"
1685 b"to manage this file\n"
1686 b"(use 'hg revert %s' to cancel the "
1686 b"(use 'hg revert %s' to cancel the "
1687 b"pending addition)\n"
1687 b"pending addition)\n"
1688 )
1688 )
1689 % (f, 3 * st.st_size // 1000000, uipath(f))
1689 % (f, 3 * st.st_size // 1000000, uipath(f))
1690 )
1690 )
1691 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1691 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1692 ui.warn(
1692 ui.warn(
1693 _(
1693 _(
1694 b"%s not added: only files and symlinks "
1694 b"%s not added: only files and symlinks "
1695 b"supported currently\n"
1695 b"supported currently\n"
1696 )
1696 )
1697 % uipath(f)
1697 % uipath(f)
1698 )
1698 )
1699 rejected.append(f)
1699 rejected.append(f)
1700 elif ds[f] in b'amn':
1700 elif ds[f] in b'amn':
1701 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1701 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1702 elif ds[f] == b'r':
1702 elif ds[f] == b'r':
1703 ds.normallookup(f)
1703 ds.normallookup(f)
1704 else:
1704 else:
1705 ds.add(f)
1705 ds.add(f)
1706 return rejected
1706 return rejected
1707
1707
1708 def forget(self, files, prefix=b""):
1708 def forget(self, files, prefix=b""):
1709 with self._repo.wlock():
1709 with self._repo.wlock():
1710 ds = self._repo.dirstate
1710 ds = self._repo.dirstate
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 rejected = []
1712 rejected = []
1713 for f in files:
1713 for f in files:
1714 if f not in ds:
1714 if f not in ds:
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 rejected.append(f)
1716 rejected.append(f)
1717 elif ds[f] != b'a':
1717 elif ds[f] != b'a':
1718 ds.remove(f)
1718 ds.remove(f)
1719 else:
1719 else:
1720 ds.drop(f)
1720 ds.drop(f)
1721 return rejected
1721 return rejected
1722
1722
1723 def copy(self, source, dest):
1723 def copy(self, source, dest):
1724 try:
1724 try:
1725 st = self._repo.wvfs.lstat(dest)
1725 st = self._repo.wvfs.lstat(dest)
1726 except OSError as err:
1726 except OSError as err:
1727 if err.errno != errno.ENOENT:
1727 if err.errno != errno.ENOENT:
1728 raise
1728 raise
1729 self._repo.ui.warn(
1729 self._repo.ui.warn(
1730 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1730 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1731 )
1731 )
1732 return
1732 return
1733 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1733 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1734 self._repo.ui.warn(
1734 self._repo.ui.warn(
1735 _(b"copy failed: %s is not a file or a symbolic link\n")
1735 _(b"copy failed: %s is not a file or a symbolic link\n")
1736 % self._repo.dirstate.pathto(dest)
1736 % self._repo.dirstate.pathto(dest)
1737 )
1737 )
1738 else:
1738 else:
1739 with self._repo.wlock():
1739 with self._repo.wlock():
1740 ds = self._repo.dirstate
1740 ds = self._repo.dirstate
1741 if ds[dest] in b'?':
1741 if ds[dest] in b'?':
1742 ds.add(dest)
1742 ds.add(dest)
1743 elif ds[dest] in b'r':
1743 elif ds[dest] in b'r':
1744 ds.normallookup(dest)
1744 ds.normallookup(dest)
1745 ds.copy(source, dest)
1745 ds.copy(source, dest)
1746
1746
1747 def match(
1747 def match(
1748 self,
1748 self,
1749 pats=None,
1749 pats=None,
1750 include=None,
1750 include=None,
1751 exclude=None,
1751 exclude=None,
1752 default=b'glob',
1752 default=b'glob',
1753 listsubrepos=False,
1753 listsubrepos=False,
1754 badfn=None,
1754 badfn=None,
1755 cwd=None,
1755 cwd=None,
1756 ):
1756 ):
1757 r = self._repo
1757 r = self._repo
1758 if not cwd:
1758 if not cwd:
1759 cwd = r.getcwd()
1759 cwd = r.getcwd()
1760
1760
1761 # Only a case insensitive filesystem needs magic to translate user input
1761 # Only a case insensitive filesystem needs magic to translate user input
1762 # to actual case in the filesystem.
1762 # to actual case in the filesystem.
1763 icasefs = not util.fscasesensitive(r.root)
1763 icasefs = not util.fscasesensitive(r.root)
1764 return matchmod.match(
1764 return matchmod.match(
1765 r.root,
1765 r.root,
1766 cwd,
1766 cwd,
1767 pats,
1767 pats,
1768 include,
1768 include,
1769 exclude,
1769 exclude,
1770 default,
1770 default,
1771 auditor=r.auditor,
1771 auditor=r.auditor,
1772 ctx=self,
1772 ctx=self,
1773 listsubrepos=listsubrepos,
1773 listsubrepos=listsubrepos,
1774 badfn=badfn,
1774 badfn=badfn,
1775 icasefs=icasefs,
1775 icasefs=icasefs,
1776 )
1776 )
1777
1777
1778 def _filtersuspectsymlink(self, files):
1778 def _filtersuspectsymlink(self, files):
1779 if not files or self._repo.dirstate._checklink:
1779 if not files or self._repo.dirstate._checklink:
1780 return files
1780 return files
1781
1781
1782 # Symlink placeholders may get non-symlink-like contents
1782 # Symlink placeholders may get non-symlink-like contents
1783 # via user error or dereferencing by NFS or Samba servers,
1783 # via user error or dereferencing by NFS or Samba servers,
1784 # so we filter out any placeholders that don't look like a
1784 # so we filter out any placeholders that don't look like a
1785 # symlink
1785 # symlink
1786 sane = []
1786 sane = []
1787 for f in files:
1787 for f in files:
1788 if self.flags(f) == b'l':
1788 if self.flags(f) == b'l':
1789 d = self[f].data()
1789 d = self[f].data()
1790 if (
1790 if (
1791 d == b''
1791 d == b''
1792 or len(d) >= 1024
1792 or len(d) >= 1024
1793 or b'\n' in d
1793 or b'\n' in d
1794 or stringutil.binary(d)
1794 or stringutil.binary(d)
1795 ):
1795 ):
1796 self._repo.ui.debug(
1796 self._repo.ui.debug(
1797 b'ignoring suspect symlink placeholder "%s"\n' % f
1797 b'ignoring suspect symlink placeholder "%s"\n' % f
1798 )
1798 )
1799 continue
1799 continue
1800 sane.append(f)
1800 sane.append(f)
1801 return sane
1801 return sane
1802
1802
1803 def _checklookup(self, files):
1803 def _checklookup(self, files):
1804 # check for any possibly clean files
1804 # check for any possibly clean files
1805 if not files:
1805 if not files:
1806 return [], [], []
1806 return [], [], []
1807
1807
1808 modified = []
1808 modified = []
1809 deleted = []
1809 deleted = []
1810 fixup = []
1810 fixup = []
1811 pctx = self._parents[0]
1811 pctx = self._parents[0]
1812 # do a full compare of any files that might have changed
1812 # do a full compare of any files that might have changed
1813 for f in sorted(files):
1813 for f in sorted(files):
1814 try:
1814 try:
1815 # This will return True for a file that got replaced by a
1815 # This will return True for a file that got replaced by a
1816 # directory in the interim, but fixing that is pretty hard.
1816 # directory in the interim, but fixing that is pretty hard.
1817 if (
1817 if (
1818 f not in pctx
1818 f not in pctx
1819 or self.flags(f) != pctx.flags(f)
1819 or self.flags(f) != pctx.flags(f)
1820 or pctx[f].cmp(self[f])
1820 or pctx[f].cmp(self[f])
1821 ):
1821 ):
1822 modified.append(f)
1822 modified.append(f)
1823 else:
1823 else:
1824 fixup.append(f)
1824 fixup.append(f)
1825 except (IOError, OSError):
1825 except (IOError, OSError):
1826 # A file become inaccessible in between? Mark it as deleted,
1826 # A file become inaccessible in between? Mark it as deleted,
1827 # matching dirstate behavior (issue5584).
1827 # matching dirstate behavior (issue5584).
1828 # The dirstate has more complex behavior around whether a
1828 # The dirstate has more complex behavior around whether a
1829 # missing file matches a directory, etc, but we don't need to
1829 # missing file matches a directory, etc, but we don't need to
1830 # bother with that: if f has made it to this point, we're sure
1830 # bother with that: if f has made it to this point, we're sure
1831 # it's in the dirstate.
1831 # it's in the dirstate.
1832 deleted.append(f)
1832 deleted.append(f)
1833
1833
1834 return modified, deleted, fixup
1834 return modified, deleted, fixup
1835
1835
1836 def _poststatusfixup(self, status, fixup):
1836 def _poststatusfixup(self, status, fixup):
1837 """update dirstate for files that are actually clean"""
1837 """update dirstate for files that are actually clean"""
1838 poststatus = self._repo.postdsstatus()
1838 poststatus = self._repo.postdsstatus()
1839 if fixup or poststatus:
1839 if fixup or poststatus:
1840 try:
1840 try:
1841 oldid = self._repo.dirstate.identity()
1841 oldid = self._repo.dirstate.identity()
1842
1842
1843 # updating the dirstate is optional
1843 # updating the dirstate is optional
1844 # so we don't wait on the lock
1844 # so we don't wait on the lock
1845 # wlock can invalidate the dirstate, so cache normal _after_
1845 # wlock can invalidate the dirstate, so cache normal _after_
1846 # taking the lock
1846 # taking the lock
1847 with self._repo.wlock(False):
1847 with self._repo.wlock(False):
1848 if self._repo.dirstate.identity() == oldid:
1848 if self._repo.dirstate.identity() == oldid:
1849 if fixup:
1849 if fixup:
1850 normal = self._repo.dirstate.normal
1850 normal = self._repo.dirstate.normal
1851 for f in fixup:
1851 for f in fixup:
1852 normal(f)
1852 normal(f)
1853 # write changes out explicitly, because nesting
1853 # write changes out explicitly, because nesting
1854 # wlock at runtime may prevent 'wlock.release()'
1854 # wlock at runtime may prevent 'wlock.release()'
1855 # after this block from doing so for subsequent
1855 # after this block from doing so for subsequent
1856 # changing files
1856 # changing files
1857 tr = self._repo.currenttransaction()
1857 tr = self._repo.currenttransaction()
1858 self._repo.dirstate.write(tr)
1858 self._repo.dirstate.write(tr)
1859
1859
1860 if poststatus:
1860 if poststatus:
1861 for ps in poststatus:
1861 for ps in poststatus:
1862 ps(self, status)
1862 ps(self, status)
1863 else:
1863 else:
1864 # in this case, writing changes out breaks
1864 # in this case, writing changes out breaks
1865 # consistency, because .hg/dirstate was
1865 # consistency, because .hg/dirstate was
1866 # already changed simultaneously after last
1866 # already changed simultaneously after last
1867 # caching (see also issue5584 for detail)
1867 # caching (see also issue5584 for detail)
1868 self._repo.ui.debug(
1868 self._repo.ui.debug(
1869 b'skip updating dirstate: identity mismatch\n'
1869 b'skip updating dirstate: identity mismatch\n'
1870 )
1870 )
1871 except error.LockError:
1871 except error.LockError:
1872 pass
1872 pass
1873 finally:
1873 finally:
1874 # Even if the wlock couldn't be grabbed, clear out the list.
1874 # Even if the wlock couldn't be grabbed, clear out the list.
1875 self._repo.clearpostdsstatus()
1875 self._repo.clearpostdsstatus()
1876
1876
1877 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1878 '''Gets the status from the dirstate -- internal use only.'''
1878 '''Gets the status from the dirstate -- internal use only.'''
1879 subrepos = []
1879 subrepos = []
1880 if b'.hgsub' in self:
1880 if b'.hgsub' in self:
1881 subrepos = sorted(self.substate)
1881 subrepos = sorted(self.substate)
1882 cmp, s = self._repo.dirstate.status(
1882 cmp, s = self._repo.dirstate.status(
1883 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1884 )
1884 )
1885
1885
1886 # check for any possibly clean files
1886 # check for any possibly clean files
1887 fixup = []
1887 fixup = []
1888 if cmp:
1888 if cmp:
1889 modified2, deleted2, fixup = self._checklookup(cmp)
1889 modified2, deleted2, fixup = self._checklookup(cmp)
1890 s.modified.extend(modified2)
1890 s.modified.extend(modified2)
1891 s.deleted.extend(deleted2)
1891 s.deleted.extend(deleted2)
1892
1892
1893 if fixup and clean:
1893 if fixup and clean:
1894 s.clean.extend(fixup)
1894 s.clean.extend(fixup)
1895
1895
1896 self._poststatusfixup(s, fixup)
1896 self._poststatusfixup(s, fixup)
1897
1897
1898 if match.always():
1898 if match.always():
1899 # cache for performance
1899 # cache for performance
1900 if s.unknown or s.ignored or s.clean:
1900 if s.unknown or s.ignored or s.clean:
1901 # "_status" is cached with list*=False in the normal route
1901 # "_status" is cached with list*=False in the normal route
1902 self._status = scmutil.status(
1902 self._status = scmutil.status(
1903 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 s.modified, s.added, s.removed, s.deleted, [], [], []
1904 )
1904 )
1905 else:
1905 else:
1906 self._status = s
1906 self._status = s
1907
1907
1908 return s
1908 return s
1909
1909
1910 @propertycache
1910 @propertycache
1911 def _copies(self):
1911 def _copies(self):
1912 p1copies = {}
1912 p1copies = {}
1913 p2copies = {}
1913 p2copies = {}
1914 parents = self._repo.dirstate.parents()
1914 parents = self._repo.dirstate.parents()
1915 p1manifest = self._repo[parents[0]].manifest()
1915 p1manifest = self._repo[parents[0]].manifest()
1916 p2manifest = self._repo[parents[1]].manifest()
1916 p2manifest = self._repo[parents[1]].manifest()
1917 changedset = set(self.added()) | set(self.modified())
1917 changedset = set(self.added()) | set(self.modified())
1918 narrowmatch = self._repo.narrowmatch()
1918 narrowmatch = self._repo.narrowmatch()
1919 for dst, src in self._repo.dirstate.copies().items():
1919 for dst, src in self._repo.dirstate.copies().items():
1920 if dst not in changedset or not narrowmatch(dst):
1920 if dst not in changedset or not narrowmatch(dst):
1921 continue
1921 continue
1922 if src in p1manifest:
1922 if src in p1manifest:
1923 p1copies[dst] = src
1923 p1copies[dst] = src
1924 elif src in p2manifest:
1924 elif src in p2manifest:
1925 p2copies[dst] = src
1925 p2copies[dst] = src
1926 return p1copies, p2copies
1926 return p1copies, p2copies
1927
1927
1928 @propertycache
1928 @propertycache
1929 def _manifest(self):
1929 def _manifest(self):
1930 """generate a manifest corresponding to the values in self._status
1930 """generate a manifest corresponding to the values in self._status
1931
1931
1932 This reuse the file nodeid from parent, but we use special node
1932 This reuse the file nodeid from parent, but we use special node
1933 identifiers for added and modified files. This is used by manifests
1933 identifiers for added and modified files. This is used by manifests
1934 merge to see that files are different and by update logic to avoid
1934 merge to see that files are different and by update logic to avoid
1935 deleting newly added files.
1935 deleting newly added files.
1936 """
1936 """
1937 return self._buildstatusmanifest(self._status)
1937 return self._buildstatusmanifest(self._status)
1938
1938
1939 def _buildstatusmanifest(self, status):
1939 def _buildstatusmanifest(self, status):
1940 """Builds a manifest that includes the given status results."""
1940 """Builds a manifest that includes the given status results."""
1941 parents = self.parents()
1941 parents = self.parents()
1942
1942
1943 man = parents[0].manifest().copy()
1943 man = parents[0].manifest().copy()
1944
1944
1945 ff = self._flagfunc
1945 ff = self._flagfunc
1946 for i, l in (
1946 for i, l in (
1947 (addednodeid, status.added),
1947 (addednodeid, status.added),
1948 (modifiednodeid, status.modified),
1948 (modifiednodeid, status.modified),
1949 ):
1949 ):
1950 for f in l:
1950 for f in l:
1951 man[f] = i
1951 man[f] = i
1952 try:
1952 try:
1953 man.setflag(f, ff(f))
1953 man.setflag(f, ff(f))
1954 except OSError:
1954 except OSError:
1955 pass
1955 pass
1956
1956
1957 for f in status.deleted + status.removed:
1957 for f in status.deleted + status.removed:
1958 if f in man:
1958 if f in man:
1959 del man[f]
1959 del man[f]
1960
1960
1961 return man
1961 return man
1962
1962
1963 def _buildstatus(
1963 def _buildstatus(
1964 self, other, s, match, listignored, listclean, listunknown
1964 self, other, s, match, listignored, listclean, listunknown
1965 ):
1965 ):
1966 """build a status with respect to another context
1966 """build a status with respect to another context
1967
1967
1968 This includes logic for maintaining the fast path of status when
1968 This includes logic for maintaining the fast path of status when
1969 comparing the working directory against its parent, which is to skip
1969 comparing the working directory against its parent, which is to skip
1970 building a new manifest if self (working directory) is not comparing
1970 building a new manifest if self (working directory) is not comparing
1971 against its parent (repo['.']).
1971 against its parent (repo['.']).
1972 """
1972 """
1973 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1974 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1975 # might have accidentally ended up with the entire contents of the file
1975 # might have accidentally ended up with the entire contents of the file
1976 # they are supposed to be linking to.
1976 # they are supposed to be linking to.
1977 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 s.modified[:] = self._filtersuspectsymlink(s.modified)
1978 if other != self._repo[b'.']:
1978 if other != self._repo[b'.']:
1979 s = super(workingctx, self)._buildstatus(
1979 s = super(workingctx, self)._buildstatus(
1980 other, s, match, listignored, listclean, listunknown
1980 other, s, match, listignored, listclean, listunknown
1981 )
1981 )
1982 return s
1982 return s
1983
1983
1984 def _matchstatus(self, other, match):
1984 def _matchstatus(self, other, match):
1985 """override the match method with a filter for directory patterns
1985 """override the match method with a filter for directory patterns
1986
1986
1987 We use inheritance to customize the match.bad method only in cases of
1987 We use inheritance to customize the match.bad method only in cases of
1988 workingctx since it belongs only to the working directory when
1988 workingctx since it belongs only to the working directory when
1989 comparing against the parent changeset.
1989 comparing against the parent changeset.
1990
1990
1991 If we aren't comparing against the working directory's parent, then we
1991 If we aren't comparing against the working directory's parent, then we
1992 just use the default match object sent to us.
1992 just use the default match object sent to us.
1993 """
1993 """
1994 if other != self._repo[b'.']:
1994 if other != self._repo[b'.']:
1995
1995
1996 def bad(f, msg):
1996 def bad(f, msg):
1997 # 'f' may be a directory pattern from 'match.files()',
1997 # 'f' may be a directory pattern from 'match.files()',
1998 # so 'f not in ctx1' is not enough
1998 # so 'f not in ctx1' is not enough
1999 if f not in other and not other.hasdir(f):
1999 if f not in other and not other.hasdir(f):
2000 self._repo.ui.warn(
2000 self._repo.ui.warn(
2001 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2002 )
2002 )
2003
2003
2004 match.bad = bad
2004 match.bad = bad
2005 return match
2005 return match
2006
2006
2007 def walk(self, match):
2007 def walk(self, match):
2008 '''Generates matching file names.'''
2008 '''Generates matching file names.'''
2009 return sorted(
2009 return sorted(
2010 self._repo.dirstate.walk(
2010 self._repo.dirstate.walk(
2011 self._repo.narrowmatch(match),
2011 self._repo.narrowmatch(match),
2012 subrepos=sorted(self.substate),
2012 subrepos=sorted(self.substate),
2013 unknown=True,
2013 unknown=True,
2014 ignored=False,
2014 ignored=False,
2015 )
2015 )
2016 )
2016 )
2017
2017
2018 def matches(self, match):
2018 def matches(self, match):
2019 match = self._repo.narrowmatch(match)
2019 match = self._repo.narrowmatch(match)
2020 ds = self._repo.dirstate
2020 ds = self._repo.dirstate
2021 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2021 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2022
2022
2023 def markcommitted(self, node):
2023 def markcommitted(self, node):
2024 with self._repo.dirstate.parentchange():
2024 with self._repo.dirstate.parentchange():
2025 for f in self.modified() + self.added():
2025 for f in self.modified() + self.added():
2026 self._repo.dirstate.normal(f)
2026 self._repo.dirstate.normal(f)
2027 for f in self.removed():
2027 for f in self.removed():
2028 self._repo.dirstate.drop(f)
2028 self._repo.dirstate.drop(f)
2029 self._repo.dirstate.setparents(node)
2029 self._repo.dirstate.setparents(node)
2030 self._repo._quick_access_changeid_invalidate()
2030 self._repo._quick_access_changeid_invalidate()
2031
2031
2032 # write changes out explicitly, because nesting wlock at
2032 # write changes out explicitly, because nesting wlock at
2033 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2033 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2034 # from immediately doing so for subsequent changing files
2034 # from immediately doing so for subsequent changing files
2035 self._repo.dirstate.write(self._repo.currenttransaction())
2035 self._repo.dirstate.write(self._repo.currenttransaction())
2036
2036
2037 sparse.aftercommit(self._repo, node)
2037 sparse.aftercommit(self._repo, node)
2038
2038
2039 def mergestate(self, clean=False):
2039 def mergestate(self, clean=False):
2040 if clean:
2040 if clean:
2041 return mergestatemod.mergestate.clean(self._repo)
2041 return mergestatemod.mergestate.clean(self._repo)
2042 return mergestatemod.mergestate.read(self._repo)
2042 return mergestatemod.mergestate.read(self._repo)
2043
2043
2044
2044
2045 class committablefilectx(basefilectx):
2045 class committablefilectx(basefilectx):
2046 """A committablefilectx provides common functionality for a file context
2046 """A committablefilectx provides common functionality for a file context
2047 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2047 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2048
2048
2049 def __init__(self, repo, path, filelog=None, ctx=None):
2049 def __init__(self, repo, path, filelog=None, ctx=None):
2050 self._repo = repo
2050 self._repo = repo
2051 self._path = path
2051 self._path = path
2052 self._changeid = None
2052 self._changeid = None
2053 self._filerev = self._filenode = None
2053 self._filerev = self._filenode = None
2054
2054
2055 if filelog is not None:
2055 if filelog is not None:
2056 self._filelog = filelog
2056 self._filelog = filelog
2057 if ctx:
2057 if ctx:
2058 self._changectx = ctx
2058 self._changectx = ctx
2059
2059
2060 def __nonzero__(self):
2060 def __nonzero__(self):
2061 return True
2061 return True
2062
2062
2063 __bool__ = __nonzero__
2063 __bool__ = __nonzero__
2064
2064
2065 def linkrev(self):
2065 def linkrev(self):
2066 # linked to self._changectx no matter if file is modified or not
2066 # linked to self._changectx no matter if file is modified or not
2067 return self.rev()
2067 return self.rev()
2068
2068
2069 def renamed(self):
2069 def renamed(self):
2070 path = self.copysource()
2070 path = self.copysource()
2071 if not path:
2071 if not path:
2072 return None
2072 return None
2073 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2073 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2074
2074
2075 def parents(self):
2075 def parents(self):
2076 '''return parent filectxs, following copies if necessary'''
2076 '''return parent filectxs, following copies if necessary'''
2077
2077
2078 def filenode(ctx, path):
2078 def filenode(ctx, path):
2079 return ctx._manifest.get(path, nullid)
2079 return ctx._manifest.get(path, nullid)
2080
2080
2081 path = self._path
2081 path = self._path
2082 fl = self._filelog
2082 fl = self._filelog
2083 pcl = self._changectx._parents
2083 pcl = self._changectx._parents
2084 renamed = self.renamed()
2084 renamed = self.renamed()
2085
2085
2086 if renamed:
2086 if renamed:
2087 pl = [renamed + (None,)]
2087 pl = [renamed + (None,)]
2088 else:
2088 else:
2089 pl = [(path, filenode(pcl[0], path), fl)]
2089 pl = [(path, filenode(pcl[0], path), fl)]
2090
2090
2091 for pc in pcl[1:]:
2091 for pc in pcl[1:]:
2092 pl.append((path, filenode(pc, path), fl))
2092 pl.append((path, filenode(pc, path), fl))
2093
2093
2094 return [
2094 return [
2095 self._parentfilectx(p, fileid=n, filelog=l)
2095 self._parentfilectx(p, fileid=n, filelog=l)
2096 for p, n, l in pl
2096 for p, n, l in pl
2097 if n != nullid
2097 if n != nullid
2098 ]
2098 ]
2099
2099
2100 def children(self):
2100 def children(self):
2101 return []
2101 return []
2102
2102
2103
2103
2104 class workingfilectx(committablefilectx):
2104 class workingfilectx(committablefilectx):
2105 """A workingfilectx object makes access to data related to a particular
2105 """A workingfilectx object makes access to data related to a particular
2106 file in the working directory convenient."""
2106 file in the working directory convenient."""
2107
2107
2108 def __init__(self, repo, path, filelog=None, workingctx=None):
2108 def __init__(self, repo, path, filelog=None, workingctx=None):
2109 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2109 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2110
2110
2111 @propertycache
2111 @propertycache
2112 def _changectx(self):
2112 def _changectx(self):
2113 return workingctx(self._repo)
2113 return workingctx(self._repo)
2114
2114
2115 def data(self):
2115 def data(self):
2116 return self._repo.wread(self._path)
2116 return self._repo.wread(self._path)
2117
2117
2118 def copysource(self):
2118 def copysource(self):
2119 return self._repo.dirstate.copied(self._path)
2119 return self._repo.dirstate.copied(self._path)
2120
2120
2121 def size(self):
2121 def size(self):
2122 return self._repo.wvfs.lstat(self._path).st_size
2122 return self._repo.wvfs.lstat(self._path).st_size
2123
2123
2124 def lstat(self):
2124 def lstat(self):
2125 return self._repo.wvfs.lstat(self._path)
2125 return self._repo.wvfs.lstat(self._path)
2126
2126
2127 def date(self):
2127 def date(self):
2128 t, tz = self._changectx.date()
2128 t, tz = self._changectx.date()
2129 try:
2129 try:
2130 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2130 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2131 except OSError as err:
2131 except OSError as err:
2132 if err.errno != errno.ENOENT:
2132 if err.errno != errno.ENOENT:
2133 raise
2133 raise
2134 return (t, tz)
2134 return (t, tz)
2135
2135
2136 def exists(self):
2136 def exists(self):
2137 return self._repo.wvfs.exists(self._path)
2137 return self._repo.wvfs.exists(self._path)
2138
2138
2139 def lexists(self):
2139 def lexists(self):
2140 return self._repo.wvfs.lexists(self._path)
2140 return self._repo.wvfs.lexists(self._path)
2141
2141
2142 def audit(self):
2142 def audit(self):
2143 return self._repo.wvfs.audit(self._path)
2143 return self._repo.wvfs.audit(self._path)
2144
2144
2145 def cmp(self, fctx):
2145 def cmp(self, fctx):
2146 """compare with other file context
2146 """compare with other file context
2147
2147
2148 returns True if different than fctx.
2148 returns True if different than fctx.
2149 """
2149 """
2150 # fctx should be a filectx (not a workingfilectx)
2150 # fctx should be a filectx (not a workingfilectx)
2151 # invert comparison to reuse the same code path
2151 # invert comparison to reuse the same code path
2152 return fctx.cmp(self)
2152 return fctx.cmp(self)
2153
2153
2154 def remove(self, ignoremissing=False):
2154 def remove(self, ignoremissing=False):
2155 """wraps unlink for a repo's working directory"""
2155 """wraps unlink for a repo's working directory"""
2156 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2156 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2157 self._repo.wvfs.unlinkpath(
2157 self._repo.wvfs.unlinkpath(
2158 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2158 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2159 )
2159 )
2160
2160
2161 def write(self, data, flags, backgroundclose=False, **kwargs):
2161 def write(self, data, flags, backgroundclose=False, **kwargs):
2162 """wraps repo.wwrite"""
2162 """wraps repo.wwrite"""
2163 return self._repo.wwrite(
2163 return self._repo.wwrite(
2164 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2164 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2165 )
2165 )
2166
2166
2167 def markcopied(self, src):
2167 def markcopied(self, src):
2168 """marks this file a copy of `src`"""
2168 """marks this file a copy of `src`"""
2169 self._repo.dirstate.copy(src, self._path)
2169 self._repo.dirstate.copy(src, self._path)
2170
2170
2171 def clearunknown(self):
2171 def clearunknown(self):
2172 """Removes conflicting items in the working directory so that
2172 """Removes conflicting items in the working directory so that
2173 ``write()`` can be called successfully.
2173 ``write()`` can be called successfully.
2174 """
2174 """
2175 wvfs = self._repo.wvfs
2175 wvfs = self._repo.wvfs
2176 f = self._path
2176 f = self._path
2177 wvfs.audit(f)
2177 wvfs.audit(f)
2178 if self._repo.ui.configbool(
2178 if self._repo.ui.configbool(
2179 b'experimental', b'merge.checkpathconflicts'
2179 b'experimental', b'merge.checkpathconflicts'
2180 ):
2180 ):
2181 # remove files under the directory as they should already be
2181 # remove files under the directory as they should already be
2182 # warned and backed up
2182 # warned and backed up
2183 if wvfs.isdir(f) and not wvfs.islink(f):
2183 if wvfs.isdir(f) and not wvfs.islink(f):
2184 wvfs.rmtree(f, forcibly=True)
2184 wvfs.rmtree(f, forcibly=True)
2185 for p in reversed(list(pathutil.finddirs(f))):
2185 for p in reversed(list(pathutil.finddirs(f))):
2186 if wvfs.isfileorlink(p):
2186 if wvfs.isfileorlink(p):
2187 wvfs.unlink(p)
2187 wvfs.unlink(p)
2188 break
2188 break
2189 else:
2189 else:
2190 # don't remove files if path conflicts are not processed
2190 # don't remove files if path conflicts are not processed
2191 if wvfs.isdir(f) and not wvfs.islink(f):
2191 if wvfs.isdir(f) and not wvfs.islink(f):
2192 wvfs.removedirs(f)
2192 wvfs.removedirs(f)
2193
2193
2194 def setflags(self, l, x):
2194 def setflags(self, l, x):
2195 self._repo.wvfs.setflags(self._path, l, x)
2195 self._repo.wvfs.setflags(self._path, l, x)
2196
2196
2197
2197
2198 class overlayworkingctx(committablectx):
2198 class overlayworkingctx(committablectx):
2199 """Wraps another mutable context with a write-back cache that can be
2199 """Wraps another mutable context with a write-back cache that can be
2200 converted into a commit context.
2200 converted into a commit context.
2201
2201
2202 self._cache[path] maps to a dict with keys: {
2202 self._cache[path] maps to a dict with keys: {
2203 'exists': bool?
2203 'exists': bool?
2204 'date': date?
2204 'date': date?
2205 'data': str?
2205 'data': str?
2206 'flags': str?
2206 'flags': str?
2207 'copied': str? (path or None)
2207 'copied': str? (path or None)
2208 }
2208 }
2209 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2209 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2210 is `False`, the file was deleted.
2210 is `False`, the file was deleted.
2211 """
2211 """
2212
2212
2213 def __init__(self, repo):
2213 def __init__(self, repo):
2214 super(overlayworkingctx, self).__init__(repo)
2214 super(overlayworkingctx, self).__init__(repo)
2215 self.clean()
2215 self.clean()
2216
2216
2217 def setbase(self, wrappedctx):
2217 def setbase(self, wrappedctx):
2218 self._wrappedctx = wrappedctx
2218 self._wrappedctx = wrappedctx
2219 self._parents = [wrappedctx]
2219 self._parents = [wrappedctx]
2220 # Drop old manifest cache as it is now out of date.
2220 # Drop old manifest cache as it is now out of date.
2221 # This is necessary when, e.g., rebasing several nodes with one
2221 # This is necessary when, e.g., rebasing several nodes with one
2222 # ``overlayworkingctx`` (e.g. with --collapse).
2222 # ``overlayworkingctx`` (e.g. with --collapse).
2223 util.clearcachedproperty(self, b'_manifest')
2223 util.clearcachedproperty(self, b'_manifest')
2224
2224
2225 def setparents(self, p1node, p2node=nullid):
2225 def setparents(self, p1node, p2node=nullid):
2226 assert p1node == self._wrappedctx.node()
2226 assert p1node == self._wrappedctx.node()
2227 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2227 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2228
2228
2229 def data(self, path):
2229 def data(self, path):
2230 if self.isdirty(path):
2230 if self.isdirty(path):
2231 if self._cache[path][b'exists']:
2231 if self._cache[path][b'exists']:
2232 if self._cache[path][b'data'] is not None:
2232 if self._cache[path][b'data'] is not None:
2233 return self._cache[path][b'data']
2233 return self._cache[path][b'data']
2234 else:
2234 else:
2235 # Must fallback here, too, because we only set flags.
2235 # Must fallback here, too, because we only set flags.
2236 return self._wrappedctx[path].data()
2236 return self._wrappedctx[path].data()
2237 else:
2237 else:
2238 raise error.ProgrammingError(
2238 raise error.ProgrammingError(
2239 b"No such file or directory: %s" % path
2239 b"No such file or directory: %s" % path
2240 )
2240 )
2241 else:
2241 else:
2242 return self._wrappedctx[path].data()
2242 return self._wrappedctx[path].data()
2243
2243
2244 @propertycache
2244 @propertycache
2245 def _manifest(self):
2245 def _manifest(self):
2246 parents = self.parents()
2246 parents = self.parents()
2247 man = parents[0].manifest().copy()
2247 man = parents[0].manifest().copy()
2248
2248
2249 flag = self._flagfunc
2249 flag = self._flagfunc
2250 for path in self.added():
2250 for path in self.added():
2251 man[path] = addednodeid
2251 man[path] = addednodeid
2252 man.setflag(path, flag(path))
2252 man.setflag(path, flag(path))
2253 for path in self.modified():
2253 for path in self.modified():
2254 man[path] = modifiednodeid
2254 man[path] = modifiednodeid
2255 man.setflag(path, flag(path))
2255 man.setflag(path, flag(path))
2256 for path in self.removed():
2256 for path in self.removed():
2257 del man[path]
2257 del man[path]
2258 return man
2258 return man
2259
2259
2260 @propertycache
2260 @propertycache
2261 def _flagfunc(self):
2261 def _flagfunc(self):
2262 def f(path):
2262 def f(path):
2263 return self._cache[path][b'flags']
2263 return self._cache[path][b'flags']
2264
2264
2265 return f
2265 return f
2266
2266
2267 def files(self):
2267 def files(self):
2268 return sorted(self.added() + self.modified() + self.removed())
2268 return sorted(self.added() + self.modified() + self.removed())
2269
2269
2270 def modified(self):
2270 def modified(self):
2271 return [
2271 return [
2272 f
2272 f
2273 for f in self._cache.keys()
2273 for f in self._cache.keys()
2274 if self._cache[f][b'exists'] and self._existsinparent(f)
2274 if self._cache[f][b'exists'] and self._existsinparent(f)
2275 ]
2275 ]
2276
2276
2277 def added(self):
2277 def added(self):
2278 return [
2278 return [
2279 f
2279 f
2280 for f in self._cache.keys()
2280 for f in self._cache.keys()
2281 if self._cache[f][b'exists'] and not self._existsinparent(f)
2281 if self._cache[f][b'exists'] and not self._existsinparent(f)
2282 ]
2282 ]
2283
2283
2284 def removed(self):
2284 def removed(self):
2285 return [
2285 return [
2286 f
2286 f
2287 for f in self._cache.keys()
2287 for f in self._cache.keys()
2288 if not self._cache[f][b'exists'] and self._existsinparent(f)
2288 if not self._cache[f][b'exists'] and self._existsinparent(f)
2289 ]
2289 ]
2290
2290
2291 def p1copies(self):
2291 def p1copies(self):
2292 copies = {}
2292 copies = {}
2293 narrowmatch = self._repo.narrowmatch()
2293 narrowmatch = self._repo.narrowmatch()
2294 for f in self._cache.keys():
2294 for f in self._cache.keys():
2295 if not narrowmatch(f):
2295 if not narrowmatch(f):
2296 continue
2296 continue
2297 copies.pop(f, None) # delete if it exists
2297 copies.pop(f, None) # delete if it exists
2298 source = self._cache[f][b'copied']
2298 source = self._cache[f][b'copied']
2299 if source:
2299 if source:
2300 copies[f] = source
2300 copies[f] = source
2301 return copies
2301 return copies
2302
2302
2303 def p2copies(self):
2303 def p2copies(self):
2304 copies = {}
2304 copies = {}
2305 narrowmatch = self._repo.narrowmatch()
2305 narrowmatch = self._repo.narrowmatch()
2306 for f in self._cache.keys():
2306 for f in self._cache.keys():
2307 if not narrowmatch(f):
2307 if not narrowmatch(f):
2308 continue
2308 continue
2309 copies.pop(f, None) # delete if it exists
2309 copies.pop(f, None) # delete if it exists
2310 source = self._cache[f][b'copied']
2310 source = self._cache[f][b'copied']
2311 if source:
2311 if source:
2312 copies[f] = source
2312 copies[f] = source
2313 return copies
2313 return copies
2314
2314
2315 def isinmemory(self):
2315 def isinmemory(self):
2316 return True
2316 return True
2317
2317
2318 def filedate(self, path):
2318 def filedate(self, path):
2319 if self.isdirty(path):
2319 if self.isdirty(path):
2320 return self._cache[path][b'date']
2320 return self._cache[path][b'date']
2321 else:
2321 else:
2322 return self._wrappedctx[path].date()
2322 return self._wrappedctx[path].date()
2323
2323
2324 def markcopied(self, path, origin):
2324 def markcopied(self, path, origin):
2325 self._markdirty(
2325 self._markdirty(
2326 path,
2326 path,
2327 exists=True,
2327 exists=True,
2328 date=self.filedate(path),
2328 date=self.filedate(path),
2329 flags=self.flags(path),
2329 flags=self.flags(path),
2330 copied=origin,
2330 copied=origin,
2331 )
2331 )
2332
2332
2333 def copydata(self, path):
2333 def copydata(self, path):
2334 if self.isdirty(path):
2334 if self.isdirty(path):
2335 return self._cache[path][b'copied']
2335 return self._cache[path][b'copied']
2336 else:
2336 else:
2337 return None
2337 return None
2338
2338
2339 def flags(self, path):
2339 def flags(self, path):
2340 if self.isdirty(path):
2340 if self.isdirty(path):
2341 if self._cache[path][b'exists']:
2341 if self._cache[path][b'exists']:
2342 return self._cache[path][b'flags']
2342 return self._cache[path][b'flags']
2343 else:
2343 else:
2344 raise error.ProgrammingError(
2344 raise error.ProgrammingError(
2345 b"No such file or directory: %s" % path
2345 b"No such file or directory: %s" % path
2346 )
2346 )
2347 else:
2347 else:
2348 return self._wrappedctx[path].flags()
2348 return self._wrappedctx[path].flags()
2349
2349
2350 def __contains__(self, key):
2350 def __contains__(self, key):
2351 if key in self._cache:
2351 if key in self._cache:
2352 return self._cache[key][b'exists']
2352 return self._cache[key][b'exists']
2353 return key in self.p1()
2353 return key in self.p1()
2354
2354
2355 def _existsinparent(self, path):
2355 def _existsinparent(self, path):
2356 try:
2356 try:
2357 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2357 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2358 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2358 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2359 # with an ``exists()`` function.
2359 # with an ``exists()`` function.
2360 self._wrappedctx[path]
2360 self._wrappedctx[path]
2361 return True
2361 return True
2362 except error.ManifestLookupError:
2362 except error.ManifestLookupError:
2363 return False
2363 return False
2364
2364
2365 def _auditconflicts(self, path):
2365 def _auditconflicts(self, path):
2366 """Replicates conflict checks done by wvfs.write().
2366 """Replicates conflict checks done by wvfs.write().
2367
2367
2368 Since we never write to the filesystem and never call `applyupdates` in
2368 Since we never write to the filesystem and never call `applyupdates` in
2369 IMM, we'll never check that a path is actually writable -- e.g., because
2369 IMM, we'll never check that a path is actually writable -- e.g., because
2370 it adds `a/foo`, but `a` is actually a file in the other commit.
2370 it adds `a/foo`, but `a` is actually a file in the other commit.
2371 """
2371 """
2372
2372
2373 def fail(path, component):
2373 def fail(path, component):
2374 # p1() is the base and we're receiving "writes" for p2()'s
2374 # p1() is the base and we're receiving "writes" for p2()'s
2375 # files.
2375 # files.
2376 if b'l' in self.p1()[component].flags():
2376 if b'l' in self.p1()[component].flags():
2377 raise error.Abort(
2377 raise error.Abort(
2378 b"error: %s conflicts with symlink %s "
2378 b"error: %s conflicts with symlink %s "
2379 b"in %d." % (path, component, self.p1().rev())
2379 b"in %d." % (path, component, self.p1().rev())
2380 )
2380 )
2381 else:
2381 else:
2382 raise error.Abort(
2382 raise error.Abort(
2383 b"error: '%s' conflicts with file '%s' in "
2383 b"error: '%s' conflicts with file '%s' in "
2384 b"%d." % (path, component, self.p1().rev())
2384 b"%d." % (path, component, self.p1().rev())
2385 )
2385 )
2386
2386
2387 # Test that each new directory to be created to write this path from p2
2387 # Test that each new directory to be created to write this path from p2
2388 # is not a file in p1.
2388 # is not a file in p1.
2389 components = path.split(b'/')
2389 components = path.split(b'/')
2390 for i in pycompat.xrange(len(components)):
2390 for i in pycompat.xrange(len(components)):
2391 component = b"/".join(components[0:i])
2391 component = b"/".join(components[0:i])
2392 if component in self:
2392 if component in self:
2393 fail(path, component)
2393 fail(path, component)
2394
2394
2395 # Test the other direction -- that this path from p2 isn't a directory
2395 # Test the other direction -- that this path from p2 isn't a directory
2396 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2396 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2397 match = self.match([path], default=b'path')
2397 match = self.match([path], default=b'path')
2398 mfiles = list(self.p1().manifest().walk(match))
2398 mfiles = list(self.p1().manifest().walk(match))
2399 if len(mfiles) > 0:
2399 if len(mfiles) > 0:
2400 if len(mfiles) == 1 and mfiles[0] == path:
2400 if len(mfiles) == 1 and mfiles[0] == path:
2401 return
2401 return
2402 # omit the files which are deleted in current IMM wctx
2402 # omit the files which are deleted in current IMM wctx
2403 mfiles = [m for m in mfiles if m in self]
2403 mfiles = [m for m in mfiles if m in self]
2404 if not mfiles:
2404 if not mfiles:
2405 return
2405 return
2406 raise error.Abort(
2406 raise error.Abort(
2407 b"error: file '%s' cannot be written because "
2407 b"error: file '%s' cannot be written because "
2408 b" '%s/' is a directory in %s (containing %d "
2408 b" '%s/' is a directory in %s (containing %d "
2409 b"entries: %s)"
2409 b"entries: %s)"
2410 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2410 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2411 )
2411 )
2412
2412
2413 def write(self, path, data, flags=b'', **kwargs):
2413 def write(self, path, data, flags=b'', **kwargs):
2414 if data is None:
2414 if data is None:
2415 raise error.ProgrammingError(b"data must be non-None")
2415 raise error.ProgrammingError(b"data must be non-None")
2416 self._auditconflicts(path)
2416 self._auditconflicts(path)
2417 self._markdirty(
2417 self._markdirty(
2418 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2418 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2419 )
2419 )
2420
2420
2421 def setflags(self, path, l, x):
2421 def setflags(self, path, l, x):
2422 flag = b''
2422 flag = b''
2423 if l:
2423 if l:
2424 flag = b'l'
2424 flag = b'l'
2425 elif x:
2425 elif x:
2426 flag = b'x'
2426 flag = b'x'
2427 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2427 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2428
2428
2429 def remove(self, path):
2429 def remove(self, path):
2430 self._markdirty(path, exists=False)
2430 self._markdirty(path, exists=False)
2431
2431
2432 def exists(self, path):
2432 def exists(self, path):
2433 """exists behaves like `lexists`, but needs to follow symlinks and
2433 """exists behaves like `lexists`, but needs to follow symlinks and
2434 return False if they are broken.
2434 return False if they are broken.
2435 """
2435 """
2436 if self.isdirty(path):
2436 if self.isdirty(path):
2437 # If this path exists and is a symlink, "follow" it by calling
2437 # If this path exists and is a symlink, "follow" it by calling
2438 # exists on the destination path.
2438 # exists on the destination path.
2439 if (
2439 if (
2440 self._cache[path][b'exists']
2440 self._cache[path][b'exists']
2441 and b'l' in self._cache[path][b'flags']
2441 and b'l' in self._cache[path][b'flags']
2442 ):
2442 ):
2443 return self.exists(self._cache[path][b'data'].strip())
2443 return self.exists(self._cache[path][b'data'].strip())
2444 else:
2444 else:
2445 return self._cache[path][b'exists']
2445 return self._cache[path][b'exists']
2446
2446
2447 return self._existsinparent(path)
2447 return self._existsinparent(path)
2448
2448
2449 def lexists(self, path):
2449 def lexists(self, path):
2450 """lexists returns True if the path exists"""
2450 """lexists returns True if the path exists"""
2451 if self.isdirty(path):
2451 if self.isdirty(path):
2452 return self._cache[path][b'exists']
2452 return self._cache[path][b'exists']
2453
2453
2454 return self._existsinparent(path)
2454 return self._existsinparent(path)
2455
2455
2456 def size(self, path):
2456 def size(self, path):
2457 if self.isdirty(path):
2457 if self.isdirty(path):
2458 if self._cache[path][b'exists']:
2458 if self._cache[path][b'exists']:
2459 return len(self._cache[path][b'data'])
2459 return len(self._cache[path][b'data'])
2460 else:
2460 else:
2461 raise error.ProgrammingError(
2461 raise error.ProgrammingError(
2462 b"No such file or directory: %s" % path
2462 b"No such file or directory: %s" % path
2463 )
2463 )
2464 return self._wrappedctx[path].size()
2464 return self._wrappedctx[path].size()
2465
2465
2466 def tomemctx(
2466 def tomemctx(
2467 self,
2467 self,
2468 text,
2468 text,
2469 branch=None,
2469 branch=None,
2470 extra=None,
2470 extra=None,
2471 date=None,
2471 date=None,
2472 parents=None,
2472 parents=None,
2473 user=None,
2473 user=None,
2474 editor=None,
2474 editor=None,
2475 ):
2475 ):
2476 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2476 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2477 committed.
2477 committed.
2478
2478
2479 ``text`` is the commit message.
2479 ``text`` is the commit message.
2480 ``parents`` (optional) are rev numbers.
2480 ``parents`` (optional) are rev numbers.
2481 """
2481 """
2482 # Default parents to the wrapped context if not passed.
2482 # Default parents to the wrapped context if not passed.
2483 if parents is None:
2483 if parents is None:
2484 parents = self.parents()
2484 parents = self.parents()
2485 if len(parents) == 1:
2485 if len(parents) == 1:
2486 parents = (parents[0], None)
2486 parents = (parents[0], None)
2487
2487
2488 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2488 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2489 if parents[1] is None:
2489 if parents[1] is None:
2490 parents = (self._repo[parents[0]], None)
2490 parents = (self._repo[parents[0]], None)
2491 else:
2491 else:
2492 parents = (self._repo[parents[0]], self._repo[parents[1]])
2492 parents = (self._repo[parents[0]], self._repo[parents[1]])
2493
2493
2494 files = self.files()
2494 files = self.files()
2495
2495
2496 def getfile(repo, memctx, path):
2496 def getfile(repo, memctx, path):
2497 if self._cache[path][b'exists']:
2497 if self._cache[path][b'exists']:
2498 return memfilectx(
2498 return memfilectx(
2499 repo,
2499 repo,
2500 memctx,
2500 memctx,
2501 path,
2501 path,
2502 self._cache[path][b'data'],
2502 self._cache[path][b'data'],
2503 b'l' in self._cache[path][b'flags'],
2503 b'l' in self._cache[path][b'flags'],
2504 b'x' in self._cache[path][b'flags'],
2504 b'x' in self._cache[path][b'flags'],
2505 self._cache[path][b'copied'],
2505 self._cache[path][b'copied'],
2506 )
2506 )
2507 else:
2507 else:
2508 # Returning None, but including the path in `files`, is
2508 # Returning None, but including the path in `files`, is
2509 # necessary for memctx to register a deletion.
2509 # necessary for memctx to register a deletion.
2510 return None
2510 return None
2511
2511
2512 if branch is None:
2512 if branch is None:
2513 branch = self._wrappedctx.branch()
2513 branch = self._wrappedctx.branch()
2514
2514
2515 return memctx(
2515 return memctx(
2516 self._repo,
2516 self._repo,
2517 parents,
2517 parents,
2518 text,
2518 text,
2519 files,
2519 files,
2520 getfile,
2520 getfile,
2521 date=date,
2521 date=date,
2522 extra=extra,
2522 extra=extra,
2523 user=user,
2523 user=user,
2524 branch=branch,
2524 branch=branch,
2525 editor=editor,
2525 editor=editor,
2526 )
2526 )
2527
2527
2528 def tomemctx_for_amend(self, precursor):
2528 def tomemctx_for_amend(self, precursor):
2529 extra = precursor.extra().copy()
2529 extra = precursor.extra().copy()
2530 extra[b'amend_source'] = precursor.hex()
2530 extra[b'amend_source'] = precursor.hex()
2531 return self.tomemctx(
2531 return self.tomemctx(
2532 text=precursor.description(),
2532 text=precursor.description(),
2533 branch=precursor.branch(),
2533 branch=precursor.branch(),
2534 extra=extra,
2534 extra=extra,
2535 date=precursor.date(),
2535 date=precursor.date(),
2536 user=precursor.user(),
2536 user=precursor.user(),
2537 )
2537 )
2538
2538
2539 def isdirty(self, path):
2539 def isdirty(self, path):
2540 return path in self._cache
2540 return path in self._cache
2541
2541
2542 def clean(self):
2542 def clean(self):
2543 self._mergestate = None
2543 self._mergestate = None
2544 self._cache = {}
2544 self._cache = {}
2545
2545
2546 def _compact(self):
2546 def _compact(self):
2547 """Removes keys from the cache that are actually clean, by comparing
2547 """Removes keys from the cache that are actually clean, by comparing
2548 them with the underlying context.
2548 them with the underlying context.
2549
2549
2550 This can occur during the merge process, e.g. by passing --tool :local
2550 This can occur during the merge process, e.g. by passing --tool :local
2551 to resolve a conflict.
2551 to resolve a conflict.
2552 """
2552 """
2553 keys = []
2553 keys = []
2554 # This won't be perfect, but can help performance significantly when
2554 # This won't be perfect, but can help performance significantly when
2555 # using things like remotefilelog.
2555 # using things like remotefilelog.
2556 scmutil.prefetchfiles(
2556 scmutil.prefetchfiles(
2557 self.repo(),
2557 self.repo(),
2558 [
2558 [
2559 (
2559 (
2560 self.p1().rev(),
2560 self.p1().rev(),
2561 scmutil.matchfiles(self.repo(), self._cache.keys()),
2561 scmutil.matchfiles(self.repo(), self._cache.keys()),
2562 )
2562 )
2563 ],
2563 ],
2564 )
2564 )
2565
2565
2566 for path in self._cache.keys():
2566 for path in self._cache.keys():
2567 cache = self._cache[path]
2567 cache = self._cache[path]
2568 try:
2568 try:
2569 underlying = self._wrappedctx[path]
2569 underlying = self._wrappedctx[path]
2570 if (
2570 if (
2571 underlying.data() == cache[b'data']
2571 underlying.data() == cache[b'data']
2572 and underlying.flags() == cache[b'flags']
2572 and underlying.flags() == cache[b'flags']
2573 ):
2573 ):
2574 keys.append(path)
2574 keys.append(path)
2575 except error.ManifestLookupError:
2575 except error.ManifestLookupError:
2576 # Path not in the underlying manifest (created).
2576 # Path not in the underlying manifest (created).
2577 continue
2577 continue
2578
2578
2579 for path in keys:
2579 for path in keys:
2580 del self._cache[path]
2580 del self._cache[path]
2581 return keys
2581 return keys
2582
2582
2583 def _markdirty(
2583 def _markdirty(
2584 self, path, exists, data=None, date=None, flags=b'', copied=None
2584 self, path, exists, data=None, date=None, flags=b'', copied=None
2585 ):
2585 ):
2586 # data not provided, let's see if we already have some; if not, let's
2586 # data not provided, let's see if we already have some; if not, let's
2587 # grab it from our underlying context, so that we always have data if
2587 # grab it from our underlying context, so that we always have data if
2588 # the file is marked as existing.
2588 # the file is marked as existing.
2589 if exists and data is None:
2589 if exists and data is None:
2590 oldentry = self._cache.get(path) or {}
2590 oldentry = self._cache.get(path) or {}
2591 data = oldentry.get(b'data')
2591 data = oldentry.get(b'data')
2592 if data is None:
2592 if data is None:
2593 data = self._wrappedctx[path].data()
2593 data = self._wrappedctx[path].data()
2594
2594
2595 self._cache[path] = {
2595 self._cache[path] = {
2596 b'exists': exists,
2596 b'exists': exists,
2597 b'data': data,
2597 b'data': data,
2598 b'date': date,
2598 b'date': date,
2599 b'flags': flags,
2599 b'flags': flags,
2600 b'copied': copied,
2600 b'copied': copied,
2601 }
2601 }
2602 util.clearcachedproperty(self, b'_manifest')
2602 util.clearcachedproperty(self, b'_manifest')
2603
2603
2604 def filectx(self, path, filelog=None):
2604 def filectx(self, path, filelog=None):
2605 return overlayworkingfilectx(
2605 return overlayworkingfilectx(
2606 self._repo, path, parent=self, filelog=filelog
2606 self._repo, path, parent=self, filelog=filelog
2607 )
2607 )
2608
2608
2609 def mergestate(self, clean=False):
2609 def mergestate(self, clean=False):
2610 if clean or self._mergestate is None:
2610 if clean or self._mergestate is None:
2611 self._mergestate = mergestatemod.memmergestate(self._repo)
2611 self._mergestate = mergestatemod.memmergestate(self._repo)
2612 return self._mergestate
2612 return self._mergestate
2613
2613
2614
2614
2615 class overlayworkingfilectx(committablefilectx):
2615 class overlayworkingfilectx(committablefilectx):
2616 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2616 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2617 cache, which can be flushed through later by calling ``flush()``."""
2617 cache, which can be flushed through later by calling ``flush()``."""
2618
2618
2619 def __init__(self, repo, path, filelog=None, parent=None):
2619 def __init__(self, repo, path, filelog=None, parent=None):
2620 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2620 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2621 self._repo = repo
2621 self._repo = repo
2622 self._parent = parent
2622 self._parent = parent
2623 self._path = path
2623 self._path = path
2624
2624
2625 def cmp(self, fctx):
2625 def cmp(self, fctx):
2626 return self.data() != fctx.data()
2626 return self.data() != fctx.data()
2627
2627
2628 def changectx(self):
2628 def changectx(self):
2629 return self._parent
2629 return self._parent
2630
2630
2631 def data(self):
2631 def data(self):
2632 return self._parent.data(self._path)
2632 return self._parent.data(self._path)
2633
2633
2634 def date(self):
2634 def date(self):
2635 return self._parent.filedate(self._path)
2635 return self._parent.filedate(self._path)
2636
2636
2637 def exists(self):
2637 def exists(self):
2638 return self.lexists()
2638 return self.lexists()
2639
2639
2640 def lexists(self):
2640 def lexists(self):
2641 return self._parent.exists(self._path)
2641 return self._parent.exists(self._path)
2642
2642
2643 def copysource(self):
2643 def copysource(self):
2644 return self._parent.copydata(self._path)
2644 return self._parent.copydata(self._path)
2645
2645
2646 def size(self):
2646 def size(self):
2647 return self._parent.size(self._path)
2647 return self._parent.size(self._path)
2648
2648
2649 def markcopied(self, origin):
2649 def markcopied(self, origin):
2650 self._parent.markcopied(self._path, origin)
2650 self._parent.markcopied(self._path, origin)
2651
2651
2652 def audit(self):
2652 def audit(self):
2653 pass
2653 pass
2654
2654
2655 def flags(self):
2655 def flags(self):
2656 return self._parent.flags(self._path)
2656 return self._parent.flags(self._path)
2657
2657
2658 def setflags(self, islink, isexec):
2658 def setflags(self, islink, isexec):
2659 return self._parent.setflags(self._path, islink, isexec)
2659 return self._parent.setflags(self._path, islink, isexec)
2660
2660
2661 def write(self, data, flags, backgroundclose=False, **kwargs):
2661 def write(self, data, flags, backgroundclose=False, **kwargs):
2662 return self._parent.write(self._path, data, flags, **kwargs)
2662 return self._parent.write(self._path, data, flags, **kwargs)
2663
2663
2664 def remove(self, ignoremissing=False):
2664 def remove(self, ignoremissing=False):
2665 return self._parent.remove(self._path)
2665 return self._parent.remove(self._path)
2666
2666
2667 def clearunknown(self):
2667 def clearunknown(self):
2668 pass
2668 pass
2669
2669
2670
2670
2671 class workingcommitctx(workingctx):
2671 class workingcommitctx(workingctx):
2672 """A workingcommitctx object makes access to data related to
2672 """A workingcommitctx object makes access to data related to
2673 the revision being committed convenient.
2673 the revision being committed convenient.
2674
2674
2675 This hides changes in the working directory, if they aren't
2675 This hides changes in the working directory, if they aren't
2676 committed in this context.
2676 committed in this context.
2677 """
2677 """
2678
2678
2679 def __init__(
2679 def __init__(
2680 self, repo, changes, text=b"", user=None, date=None, extra=None
2680 self, repo, changes, text=b"", user=None, date=None, extra=None
2681 ):
2681 ):
2682 super(workingcommitctx, self).__init__(
2682 super(workingcommitctx, self).__init__(
2683 repo, text, user, date, extra, changes
2683 repo, text, user, date, extra, changes
2684 )
2684 )
2685
2685
2686 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2686 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2687 """Return matched files only in ``self._status``
2687 """Return matched files only in ``self._status``
2688
2688
2689 Uncommitted files appear "clean" via this context, even if
2689 Uncommitted files appear "clean" via this context, even if
2690 they aren't actually so in the working directory.
2690 they aren't actually so in the working directory.
2691 """
2691 """
2692 if clean:
2692 if clean:
2693 clean = [f for f in self._manifest if f not in self._changedset]
2693 clean = [f for f in self._manifest if f not in self._changedset]
2694 else:
2694 else:
2695 clean = []
2695 clean = []
2696 return scmutil.status(
2696 return scmutil.status(
2697 [f for f in self._status.modified if match(f)],
2697 [f for f in self._status.modified if match(f)],
2698 [f for f in self._status.added if match(f)],
2698 [f for f in self._status.added if match(f)],
2699 [f for f in self._status.removed if match(f)],
2699 [f for f in self._status.removed if match(f)],
2700 [],
2700 [],
2701 [],
2701 [],
2702 [],
2702 [],
2703 clean,
2703 clean,
2704 )
2704 )
2705
2705
2706 @propertycache
2706 @propertycache
2707 def _changedset(self):
2707 def _changedset(self):
2708 """Return the set of files changed in this context"""
2708 """Return the set of files changed in this context"""
2709 changed = set(self._status.modified)
2709 changed = set(self._status.modified)
2710 changed.update(self._status.added)
2710 changed.update(self._status.added)
2711 changed.update(self._status.removed)
2711 changed.update(self._status.removed)
2712 return changed
2712 return changed
2713
2713
2714
2714
2715 def makecachingfilectxfn(func):
2715 def makecachingfilectxfn(func):
2716 """Create a filectxfn that caches based on the path.
2716 """Create a filectxfn that caches based on the path.
2717
2717
2718 We can't use util.cachefunc because it uses all arguments as the cache
2718 We can't use util.cachefunc because it uses all arguments as the cache
2719 key and this creates a cycle since the arguments include the repo and
2719 key and this creates a cycle since the arguments include the repo and
2720 memctx.
2720 memctx.
2721 """
2721 """
2722 cache = {}
2722 cache = {}
2723
2723
2724 def getfilectx(repo, memctx, path):
2724 def getfilectx(repo, memctx, path):
2725 if path not in cache:
2725 if path not in cache:
2726 cache[path] = func(repo, memctx, path)
2726 cache[path] = func(repo, memctx, path)
2727 return cache[path]
2727 return cache[path]
2728
2728
2729 return getfilectx
2729 return getfilectx
2730
2730
2731
2731
2732 def memfilefromctx(ctx):
2732 def memfilefromctx(ctx):
2733 """Given a context return a memfilectx for ctx[path]
2733 """Given a context return a memfilectx for ctx[path]
2734
2734
2735 This is a convenience method for building a memctx based on another
2735 This is a convenience method for building a memctx based on another
2736 context.
2736 context.
2737 """
2737 """
2738
2738
2739 def getfilectx(repo, memctx, path):
2739 def getfilectx(repo, memctx, path):
2740 fctx = ctx[path]
2740 fctx = ctx[path]
2741 copysource = fctx.copysource()
2741 copysource = fctx.copysource()
2742 return memfilectx(
2742 return memfilectx(
2743 repo,
2743 repo,
2744 memctx,
2744 memctx,
2745 path,
2745 path,
2746 fctx.data(),
2746 fctx.data(),
2747 islink=fctx.islink(),
2747 islink=fctx.islink(),
2748 isexec=fctx.isexec(),
2748 isexec=fctx.isexec(),
2749 copysource=copysource,
2749 copysource=copysource,
2750 )
2750 )
2751
2751
2752 return getfilectx
2752 return getfilectx
2753
2753
2754
2754
2755 def memfilefrompatch(patchstore):
2755 def memfilefrompatch(patchstore):
2756 """Given a patch (e.g. patchstore object) return a memfilectx
2756 """Given a patch (e.g. patchstore object) return a memfilectx
2757
2757
2758 This is a convenience method for building a memctx based on a patchstore.
2758 This is a convenience method for building a memctx based on a patchstore.
2759 """
2759 """
2760
2760
2761 def getfilectx(repo, memctx, path):
2761 def getfilectx(repo, memctx, path):
2762 data, mode, copysource = patchstore.getfile(path)
2762 data, mode, copysource = patchstore.getfile(path)
2763 if data is None:
2763 if data is None:
2764 return None
2764 return None
2765 islink, isexec = mode
2765 islink, isexec = mode
2766 return memfilectx(
2766 return memfilectx(
2767 repo,
2767 repo,
2768 memctx,
2768 memctx,
2769 path,
2769 path,
2770 data,
2770 data,
2771 islink=islink,
2771 islink=islink,
2772 isexec=isexec,
2772 isexec=isexec,
2773 copysource=copysource,
2773 copysource=copysource,
2774 )
2774 )
2775
2775
2776 return getfilectx
2776 return getfilectx
2777
2777
2778
2778
2779 class memctx(committablectx):
2779 class memctx(committablectx):
2780 """Use memctx to perform in-memory commits via localrepo.commitctx().
2780 """Use memctx to perform in-memory commits via localrepo.commitctx().
2781
2781
2782 Revision information is supplied at initialization time while
2782 Revision information is supplied at initialization time while
2783 related files data and is made available through a callback
2783 related files data and is made available through a callback
2784 mechanism. 'repo' is the current localrepo, 'parents' is a
2784 mechanism. 'repo' is the current localrepo, 'parents' is a
2785 sequence of two parent revisions identifiers (pass None for every
2785 sequence of two parent revisions identifiers (pass None for every
2786 missing parent), 'text' is the commit message and 'files' lists
2786 missing parent), 'text' is the commit message and 'files' lists
2787 names of files touched by the revision (normalized and relative to
2787 names of files touched by the revision (normalized and relative to
2788 repository root).
2788 repository root).
2789
2789
2790 filectxfn(repo, memctx, path) is a callable receiving the
2790 filectxfn(repo, memctx, path) is a callable receiving the
2791 repository, the current memctx object and the normalized path of
2791 repository, the current memctx object and the normalized path of
2792 requested file, relative to repository root. It is fired by the
2792 requested file, relative to repository root. It is fired by the
2793 commit function for every file in 'files', but calls order is
2793 commit function for every file in 'files', but calls order is
2794 undefined. If the file is available in the revision being
2794 undefined. If the file is available in the revision being
2795 committed (updated or added), filectxfn returns a memfilectx
2795 committed (updated or added), filectxfn returns a memfilectx
2796 object. If the file was removed, filectxfn return None for recent
2796 object. If the file was removed, filectxfn return None for recent
2797 Mercurial. Moved files are represented by marking the source file
2797 Mercurial. Moved files are represented by marking the source file
2798 removed and the new file added with copy information (see
2798 removed and the new file added with copy information (see
2799 memfilectx).
2799 memfilectx).
2800
2800
2801 user receives the committer name and defaults to current
2801 user receives the committer name and defaults to current
2802 repository username, date is the commit date in any format
2802 repository username, date is the commit date in any format
2803 supported by dateutil.parsedate() and defaults to current date, extra
2803 supported by dateutil.parsedate() and defaults to current date, extra
2804 is a dictionary of metadata or is left empty.
2804 is a dictionary of metadata or is left empty.
2805 """
2805 """
2806
2806
2807 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2807 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2808 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2808 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2809 # this field to determine what to do in filectxfn.
2809 # this field to determine what to do in filectxfn.
2810 _returnnoneformissingfiles = True
2810 _returnnoneformissingfiles = True
2811
2811
2812 def __init__(
2812 def __init__(
2813 self,
2813 self,
2814 repo,
2814 repo,
2815 parents,
2815 parents,
2816 text,
2816 text,
2817 files,
2817 files,
2818 filectxfn,
2818 filectxfn,
2819 user=None,
2819 user=None,
2820 date=None,
2820 date=None,
2821 extra=None,
2821 extra=None,
2822 branch=None,
2822 branch=None,
2823 editor=None,
2823 editor=None,
2824 ):
2824 ):
2825 super(memctx, self).__init__(
2825 super(memctx, self).__init__(
2826 repo, text, user, date, extra, branch=branch
2826 repo, text, user, date, extra, branch=branch
2827 )
2827 )
2828 self._rev = None
2828 self._rev = None
2829 self._node = None
2829 self._node = None
2830 parents = [(p or nullid) for p in parents]
2830 parents = [(p or nullid) for p in parents]
2831 p1, p2 = parents
2831 p1, p2 = parents
2832 self._parents = [self._repo[p] for p in (p1, p2)]
2832 self._parents = [self._repo[p] for p in (p1, p2)]
2833 files = sorted(set(files))
2833 files = sorted(set(files))
2834 self._files = files
2834 self._files = files
2835 self.substate = {}
2835 self.substate = {}
2836
2836
2837 if isinstance(filectxfn, patch.filestore):
2837 if isinstance(filectxfn, patch.filestore):
2838 filectxfn = memfilefrompatch(filectxfn)
2838 filectxfn = memfilefrompatch(filectxfn)
2839 elif not callable(filectxfn):
2839 elif not callable(filectxfn):
2840 # if store is not callable, wrap it in a function
2840 # if store is not callable, wrap it in a function
2841 filectxfn = memfilefromctx(filectxfn)
2841 filectxfn = memfilefromctx(filectxfn)
2842
2842
2843 # memoizing increases performance for e.g. vcs convert scenarios.
2843 # memoizing increases performance for e.g. vcs convert scenarios.
2844 self._filectxfn = makecachingfilectxfn(filectxfn)
2844 self._filectxfn = makecachingfilectxfn(filectxfn)
2845
2845
2846 if editor:
2846 if editor:
2847 self._text = editor(self._repo, self, [])
2847 self._text = editor(self._repo, self, [])
2848 self._repo.savecommitmessage(self._text)
2848 self._repo.savecommitmessage(self._text)
2849
2849
2850 def filectx(self, path, filelog=None):
2850 def filectx(self, path, filelog=None):
2851 """get a file context from the working directory
2851 """get a file context from the working directory
2852
2852
2853 Returns None if file doesn't exist and should be removed."""
2853 Returns None if file doesn't exist and should be removed."""
2854 return self._filectxfn(self._repo, self, path)
2854 return self._filectxfn(self._repo, self, path)
2855
2855
2856 def commit(self):
2856 def commit(self):
2857 """commit context to the repo"""
2857 """commit context to the repo"""
2858 return self._repo.commitctx(self)
2858 return self._repo.commitctx(self)
2859
2859
2860 @propertycache
2860 @propertycache
2861 def _manifest(self):
2861 def _manifest(self):
2862 """generate a manifest based on the return values of filectxfn"""
2862 """generate a manifest based on the return values of filectxfn"""
2863
2863
2864 # keep this simple for now; just worry about p1
2864 # keep this simple for now; just worry about p1
2865 pctx = self._parents[0]
2865 pctx = self._parents[0]
2866 man = pctx.manifest().copy()
2866 man = pctx.manifest().copy()
2867
2867
2868 for f in self._status.modified:
2868 for f in self._status.modified:
2869 man[f] = modifiednodeid
2869 man[f] = modifiednodeid
2870
2870
2871 for f in self._status.added:
2871 for f in self._status.added:
2872 man[f] = addednodeid
2872 man[f] = addednodeid
2873
2873
2874 for f in self._status.removed:
2874 for f in self._status.removed:
2875 if f in man:
2875 if f in man:
2876 del man[f]
2876 del man[f]
2877
2877
2878 return man
2878 return man
2879
2879
2880 @propertycache
2880 @propertycache
2881 def _status(self):
2881 def _status(self):
2882 """Calculate exact status from ``files`` specified at construction"""
2882 """Calculate exact status from ``files`` specified at construction"""
2883 man1 = self.p1().manifest()
2883 man1 = self.p1().manifest()
2884 p2 = self._parents[1]
2884 p2 = self._parents[1]
2885 # "1 < len(self._parents)" can't be used for checking
2885 # "1 < len(self._parents)" can't be used for checking
2886 # existence of the 2nd parent, because "memctx._parents" is
2886 # existence of the 2nd parent, because "memctx._parents" is
2887 # explicitly initialized by the list, of which length is 2.
2887 # explicitly initialized by the list, of which length is 2.
2888 if p2.node() != nullid:
2888 if p2.node() != nullid:
2889 man2 = p2.manifest()
2889 man2 = p2.manifest()
2890 managing = lambda f: f in man1 or f in man2
2890 managing = lambda f: f in man1 or f in man2
2891 else:
2891 else:
2892 managing = lambda f: f in man1
2892 managing = lambda f: f in man1
2893
2893
2894 modified, added, removed = [], [], []
2894 modified, added, removed = [], [], []
2895 for f in self._files:
2895 for f in self._files:
2896 if not managing(f):
2896 if not managing(f):
2897 added.append(f)
2897 added.append(f)
2898 elif self[f]:
2898 elif self[f]:
2899 modified.append(f)
2899 modified.append(f)
2900 else:
2900 else:
2901 removed.append(f)
2901 removed.append(f)
2902
2902
2903 return scmutil.status(modified, added, removed, [], [], [], [])
2903 return scmutil.status(modified, added, removed, [], [], [], [])
2904
2904
2905 def parents(self):
2905 def parents(self):
2906 if self._parents[1].node() == nullid:
2906 if self._parents[1].node() == nullid:
2907 return [self._parents[0]]
2907 return [self._parents[0]]
2908 return self._parents
2908 return self._parents
2909
2909
2910
2910
2911 class memfilectx(committablefilectx):
2911 class memfilectx(committablefilectx):
2912 """memfilectx represents an in-memory file to commit.
2912 """memfilectx represents an in-memory file to commit.
2913
2913
2914 See memctx and committablefilectx for more details.
2914 See memctx and committablefilectx for more details.
2915 """
2915 """
2916
2916
2917 def __init__(
2917 def __init__(
2918 self,
2918 self,
2919 repo,
2919 repo,
2920 changectx,
2920 changectx,
2921 path,
2921 path,
2922 data,
2922 data,
2923 islink=False,
2923 islink=False,
2924 isexec=False,
2924 isexec=False,
2925 copysource=None,
2925 copysource=None,
2926 ):
2926 ):
2927 """
2927 """
2928 path is the normalized file path relative to repository root.
2928 path is the normalized file path relative to repository root.
2929 data is the file content as a string.
2929 data is the file content as a string.
2930 islink is True if the file is a symbolic link.
2930 islink is True if the file is a symbolic link.
2931 isexec is True if the file is executable.
2931 isexec is True if the file is executable.
2932 copied is the source file path if current file was copied in the
2932 copied is the source file path if current file was copied in the
2933 revision being committed, or None."""
2933 revision being committed, or None."""
2934 super(memfilectx, self).__init__(repo, path, None, changectx)
2934 super(memfilectx, self).__init__(repo, path, None, changectx)
2935 self._data = data
2935 self._data = data
2936 if islink:
2936 if islink:
2937 self._flags = b'l'
2937 self._flags = b'l'
2938 elif isexec:
2938 elif isexec:
2939 self._flags = b'x'
2939 self._flags = b'x'
2940 else:
2940 else:
2941 self._flags = b''
2941 self._flags = b''
2942 self._copysource = copysource
2942 self._copysource = copysource
2943
2943
2944 def copysource(self):
2944 def copysource(self):
2945 return self._copysource
2945 return self._copysource
2946
2946
2947 def cmp(self, fctx):
2947 def cmp(self, fctx):
2948 return self.data() != fctx.data()
2948 return self.data() != fctx.data()
2949
2949
2950 def data(self):
2950 def data(self):
2951 return self._data
2951 return self._data
2952
2952
2953 def remove(self, ignoremissing=False):
2953 def remove(self, ignoremissing=False):
2954 """wraps unlink for a repo's working directory"""
2954 """wraps unlink for a repo's working directory"""
2955 # need to figure out what to do here
2955 # need to figure out what to do here
2956 del self._changectx[self._path]
2956 del self._changectx[self._path]
2957
2957
2958 def write(self, data, flags, **kwargs):
2958 def write(self, data, flags, **kwargs):
2959 """wraps repo.wwrite"""
2959 """wraps repo.wwrite"""
2960 self._data = data
2960 self._data = data
2961
2961
2962
2962
2963 class metadataonlyctx(committablectx):
2963 class metadataonlyctx(committablectx):
2964 """Like memctx but it's reusing the manifest of different commit.
2964 """Like memctx but it's reusing the manifest of different commit.
2965 Intended to be used by lightweight operations that are creating
2965 Intended to be used by lightweight operations that are creating
2966 metadata-only changes.
2966 metadata-only changes.
2967
2967
2968 Revision information is supplied at initialization time. 'repo' is the
2968 Revision information is supplied at initialization time. 'repo' is the
2969 current localrepo, 'ctx' is original revision which manifest we're reuisng
2969 current localrepo, 'ctx' is original revision which manifest we're reuisng
2970 'parents' is a sequence of two parent revisions identifiers (pass None for
2970 'parents' is a sequence of two parent revisions identifiers (pass None for
2971 every missing parent), 'text' is the commit.
2971 every missing parent), 'text' is the commit.
2972
2972
2973 user receives the committer name and defaults to current repository
2973 user receives the committer name and defaults to current repository
2974 username, date is the commit date in any format supported by
2974 username, date is the commit date in any format supported by
2975 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2975 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2976 metadata or is left empty.
2976 metadata or is left empty.
2977 """
2977 """
2978
2978
2979 def __init__(
2979 def __init__(
2980 self,
2980 self,
2981 repo,
2981 repo,
2982 originalctx,
2982 originalctx,
2983 parents=None,
2983 parents=None,
2984 text=None,
2984 text=None,
2985 user=None,
2985 user=None,
2986 date=None,
2986 date=None,
2987 extra=None,
2987 extra=None,
2988 editor=None,
2988 editor=None,
2989 ):
2989 ):
2990 if text is None:
2990 if text is None:
2991 text = originalctx.description()
2991 text = originalctx.description()
2992 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2992 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2993 self._rev = None
2993 self._rev = None
2994 self._node = None
2994 self._node = None
2995 self._originalctx = originalctx
2995 self._originalctx = originalctx
2996 self._manifestnode = originalctx.manifestnode()
2996 self._manifestnode = originalctx.manifestnode()
2997 if parents is None:
2997 if parents is None:
2998 parents = originalctx.parents()
2998 parents = originalctx.parents()
2999 else:
2999 else:
3000 parents = [repo[p] for p in parents if p is not None]
3000 parents = [repo[p] for p in parents if p is not None]
3001 parents = parents[:]
3001 parents = parents[:]
3002 while len(parents) < 2:
3002 while len(parents) < 2:
3003 parents.append(repo[nullid])
3003 parents.append(repo[nullrev])
3004 p1, p2 = self._parents = parents
3004 p1, p2 = self._parents = parents
3005
3005
3006 # sanity check to ensure that the reused manifest parents are
3006 # sanity check to ensure that the reused manifest parents are
3007 # manifests of our commit parents
3007 # manifests of our commit parents
3008 mp1, mp2 = self.manifestctx().parents
3008 mp1, mp2 = self.manifestctx().parents
3009 if p1 != nullid and p1.manifestnode() != mp1:
3009 if p1 != nullid and p1.manifestnode() != mp1:
3010 raise RuntimeError(
3010 raise RuntimeError(
3011 r"can't reuse the manifest: its p1 "
3011 r"can't reuse the manifest: its p1 "
3012 r"doesn't match the new ctx p1"
3012 r"doesn't match the new ctx p1"
3013 )
3013 )
3014 if p2 != nullid and p2.manifestnode() != mp2:
3014 if p2 != nullid and p2.manifestnode() != mp2:
3015 raise RuntimeError(
3015 raise RuntimeError(
3016 r"can't reuse the manifest: "
3016 r"can't reuse the manifest: "
3017 r"its p2 doesn't match the new ctx p2"
3017 r"its p2 doesn't match the new ctx p2"
3018 )
3018 )
3019
3019
3020 self._files = originalctx.files()
3020 self._files = originalctx.files()
3021 self.substate = {}
3021 self.substate = {}
3022
3022
3023 if editor:
3023 if editor:
3024 self._text = editor(self._repo, self, [])
3024 self._text = editor(self._repo, self, [])
3025 self._repo.savecommitmessage(self._text)
3025 self._repo.savecommitmessage(self._text)
3026
3026
3027 def manifestnode(self):
3027 def manifestnode(self):
3028 return self._manifestnode
3028 return self._manifestnode
3029
3029
3030 @property
3030 @property
3031 def _manifestctx(self):
3031 def _manifestctx(self):
3032 return self._repo.manifestlog[self._manifestnode]
3032 return self._repo.manifestlog[self._manifestnode]
3033
3033
3034 def filectx(self, path, filelog=None):
3034 def filectx(self, path, filelog=None):
3035 return self._originalctx.filectx(path, filelog=filelog)
3035 return self._originalctx.filectx(path, filelog=filelog)
3036
3036
3037 def commit(self):
3037 def commit(self):
3038 """commit context to the repo"""
3038 """commit context to the repo"""
3039 return self._repo.commitctx(self)
3039 return self._repo.commitctx(self)
3040
3040
3041 @property
3041 @property
3042 def _manifest(self):
3042 def _manifest(self):
3043 return self._originalctx.manifest()
3043 return self._originalctx.manifest()
3044
3044
3045 @propertycache
3045 @propertycache
3046 def _status(self):
3046 def _status(self):
3047 """Calculate exact status from ``files`` specified in the ``origctx``
3047 """Calculate exact status from ``files`` specified in the ``origctx``
3048 and parents manifests.
3048 and parents manifests.
3049 """
3049 """
3050 man1 = self.p1().manifest()
3050 man1 = self.p1().manifest()
3051 p2 = self._parents[1]
3051 p2 = self._parents[1]
3052 # "1 < len(self._parents)" can't be used for checking
3052 # "1 < len(self._parents)" can't be used for checking
3053 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3053 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3054 # explicitly initialized by the list, of which length is 2.
3054 # explicitly initialized by the list, of which length is 2.
3055 if p2.node() != nullid:
3055 if p2.node() != nullid:
3056 man2 = p2.manifest()
3056 man2 = p2.manifest()
3057 managing = lambda f: f in man1 or f in man2
3057 managing = lambda f: f in man1 or f in man2
3058 else:
3058 else:
3059 managing = lambda f: f in man1
3059 managing = lambda f: f in man1
3060
3060
3061 modified, added, removed = [], [], []
3061 modified, added, removed = [], [], []
3062 for f in self._files:
3062 for f in self._files:
3063 if not managing(f):
3063 if not managing(f):
3064 added.append(f)
3064 added.append(f)
3065 elif f in self:
3065 elif f in self:
3066 modified.append(f)
3066 modified.append(f)
3067 else:
3067 else:
3068 removed.append(f)
3068 removed.append(f)
3069
3069
3070 return scmutil.status(modified, added, removed, [], [], [], [])
3070 return scmutil.status(modified, added, removed, [], [], [], [])
3071
3071
3072
3072
3073 class arbitraryfilectx(object):
3073 class arbitraryfilectx(object):
3074 """Allows you to use filectx-like functions on a file in an arbitrary
3074 """Allows you to use filectx-like functions on a file in an arbitrary
3075 location on disk, possibly not in the working directory.
3075 location on disk, possibly not in the working directory.
3076 """
3076 """
3077
3077
3078 def __init__(self, path, repo=None):
3078 def __init__(self, path, repo=None):
3079 # Repo is optional because contrib/simplemerge uses this class.
3079 # Repo is optional because contrib/simplemerge uses this class.
3080 self._repo = repo
3080 self._repo = repo
3081 self._path = path
3081 self._path = path
3082
3082
3083 def cmp(self, fctx):
3083 def cmp(self, fctx):
3084 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3084 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3085 # path if either side is a symlink.
3085 # path if either side is a symlink.
3086 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3086 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3087 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3087 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3088 # Add a fast-path for merge if both sides are disk-backed.
3088 # Add a fast-path for merge if both sides are disk-backed.
3089 # Note that filecmp uses the opposite return values (True if same)
3089 # Note that filecmp uses the opposite return values (True if same)
3090 # from our cmp functions (True if different).
3090 # from our cmp functions (True if different).
3091 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3091 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3092 return self.data() != fctx.data()
3092 return self.data() != fctx.data()
3093
3093
3094 def path(self):
3094 def path(self):
3095 return self._path
3095 return self._path
3096
3096
3097 def flags(self):
3097 def flags(self):
3098 return b''
3098 return b''
3099
3099
3100 def data(self):
3100 def data(self):
3101 return util.readfile(self._path)
3101 return util.readfile(self._path)
3102
3102
3103 def decodeddata(self):
3103 def decodeddata(self):
3104 with open(self._path, b"rb") as f:
3104 with open(self._path, b"rb") as f:
3105 return f.read()
3105 return f.read()
3106
3106
3107 def remove(self):
3107 def remove(self):
3108 util.unlink(self._path)
3108 util.unlink(self._path)
3109
3109
3110 def write(self, data, flags, **kwargs):
3110 def write(self, data, flags, **kwargs):
3111 assert not flags
3111 assert not flags
3112 with open(self._path, b"wb") as f:
3112 with open(self._path, b"wb") as f:
3113 f.write(data)
3113 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now