##// END OF EJS Templates
narrow: move remaining narrow-limited dirstate walks to core...
Martin von Zweigbergk -
r40442:1d09ba0d default
parent child Browse files
Show More
@@ -1,16 +1,24 b''
1 1 Integration with the share extension needs improvement. Right now
2 2 we've seen some odd bugs.
3 3
4 4 Address commentary in manifest.excludedmanifestrevlog.add -
5 5 specifically we should improve the collaboration with core so that
6 6 add() never gets called on an excluded directory and we can improve
7 7 the stand-in to raise a ProgrammingError.
8 8
9 9 Reason more completely about rename-filtering logic in
10 10 narrowfilelog. There could be some surprises lurking there.
11 11
12 12 Formally document the narrowspec format. For bonus points, unify with the
13 13 server-specified narrowspec format.
14 14
15 15 narrowrepo.setnarrowpats() or narrowspec.save() need to make sure
16 16 they're holding the wlock.
17
18 The follinwg places do an unrestricted dirstate walk (including files outside the
19 narrowspec). Some of them should perhaps not do that.
20
21 * debugfileset
22 * perfwalk
23 * sparse (but restricted to sparse config)
24 * largefiles
@@ -1,70 +1,63 b''
1 1 # narrowdirstate.py - extensions to mercurial dirstate to support narrow clones
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial.i18n import _
11 11 from mercurial import (
12 12 error,
13 13 )
14 14
15 15 def wrapdirstate(repo, dirstate):
16 16 """Add narrow spec dirstate ignore, block changes outside narrow spec."""
17 17
18 18 def _editfunc(fn):
19 19 def _wrapper(self, *args):
20 20 narrowmatch = repo.narrowmatch()
21 21 for f in args:
22 22 if f is not None and not narrowmatch(f) and f not in self:
23 23 raise error.Abort(_("cannot track '%s' - it is outside " +
24 24 "the narrow clone") % f)
25 25 return fn(self, *args)
26 26 return _wrapper
27 27
28 28 class narrowdirstate(dirstate.__class__):
29 def walk(self, match, subrepos, unknown, ignored, full=True,
30 narrowonly=True):
31 if narrowonly:
32 match = repo.narrowmatch(match, includeexact=True)
33 return super(narrowdirstate, self).walk(match, subrepos, unknown,
34 ignored, full)
35
36 29 # Prevent adding/editing/copying/deleting files that are outside the
37 30 # sparse checkout
38 31 @_editfunc
39 32 def normal(self, *args):
40 33 return super(narrowdirstate, self).normal(*args)
41 34
42 35 @_editfunc
43 36 def add(self, *args):
44 37 return super(narrowdirstate, self).add(*args)
45 38
46 39 @_editfunc
47 40 def normallookup(self, *args):
48 41 return super(narrowdirstate, self).normallookup(*args)
49 42
50 43 @_editfunc
51 44 def copy(self, *args):
52 45 return super(narrowdirstate, self).copy(*args)
53 46
54 47 @_editfunc
55 48 def remove(self, *args):
56 49 return super(narrowdirstate, self).remove(*args)
57 50
58 51 @_editfunc
59 52 def merge(self, *args):
60 53 return super(narrowdirstate, self).merge(*args)
61 54
62 55 def rebuild(self, parent, allfiles, changedfiles=None):
63 56 if changedfiles is None:
64 57 # Rebuilding entire dirstate, let's filter allfiles to match the
65 58 # narrowspec.
66 59 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
67 60 super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
68 61
69 62 dirstate.__class__ = narrowdirstate
70 63 return dirstate
@@ -1,3312 +1,3313 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 short,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 changelog,
25 25 copies,
26 26 crecord as crecordmod,
27 27 dirstateguard,
28 28 encoding,
29 29 error,
30 30 formatter,
31 31 logcmdutil,
32 32 match as matchmod,
33 33 merge as mergemod,
34 34 mergeutil,
35 35 obsolete,
36 36 patch,
37 37 pathutil,
38 38 phases,
39 39 pycompat,
40 40 revlog,
41 41 rewriteutil,
42 42 scmutil,
43 43 smartset,
44 44 subrepoutil,
45 45 templatekw,
46 46 templater,
47 47 util,
48 48 vfs as vfsmod,
49 49 )
50 50
51 51 from .utils import (
52 52 dateutil,
53 53 stringutil,
54 54 )
55 55
56 56 stringio = util.stringio
57 57
58 58 # templates of common command options
59 59
60 60 dryrunopts = [
61 61 ('n', 'dry-run', None,
62 62 _('do not perform actions, just print output')),
63 63 ]
64 64
65 65 confirmopts = [
66 66 ('', 'confirm', None,
67 67 _('ask before applying actions')),
68 68 ]
69 69
70 70 remoteopts = [
71 71 ('e', 'ssh', '',
72 72 _('specify ssh command to use'), _('CMD')),
73 73 ('', 'remotecmd', '',
74 74 _('specify hg command to run on the remote side'), _('CMD')),
75 75 ('', 'insecure', None,
76 76 _('do not verify server certificate (ignoring web.cacerts config)')),
77 77 ]
78 78
79 79 walkopts = [
80 80 ('I', 'include', [],
81 81 _('include names matching the given patterns'), _('PATTERN')),
82 82 ('X', 'exclude', [],
83 83 _('exclude names matching the given patterns'), _('PATTERN')),
84 84 ]
85 85
86 86 commitopts = [
87 87 ('m', 'message', '',
88 88 _('use text as commit message'), _('TEXT')),
89 89 ('l', 'logfile', '',
90 90 _('read commit message from file'), _('FILE')),
91 91 ]
92 92
93 93 commitopts2 = [
94 94 ('d', 'date', '',
95 95 _('record the specified date as commit date'), _('DATE')),
96 96 ('u', 'user', '',
97 97 _('record the specified user as committer'), _('USER')),
98 98 ]
99 99
100 100 formatteropts = [
101 101 ('T', 'template', '',
102 102 _('display with template'), _('TEMPLATE')),
103 103 ]
104 104
105 105 templateopts = [
106 106 ('', 'style', '',
107 107 _('display using template map file (DEPRECATED)'), _('STYLE')),
108 108 ('T', 'template', '',
109 109 _('display with template'), _('TEMPLATE')),
110 110 ]
111 111
112 112 logopts = [
113 113 ('p', 'patch', None, _('show patch')),
114 114 ('g', 'git', None, _('use git extended diff format')),
115 115 ('l', 'limit', '',
116 116 _('limit number of changes displayed'), _('NUM')),
117 117 ('M', 'no-merges', None, _('do not show merges')),
118 118 ('', 'stat', None, _('output diffstat-style summary of changes')),
119 119 ('G', 'graph', None, _("show the revision DAG")),
120 120 ] + templateopts
121 121
122 122 diffopts = [
123 123 ('a', 'text', None, _('treat all files as text')),
124 124 ('g', 'git', None, _('use git extended diff format')),
125 125 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
126 126 ('', 'nodates', None, _('omit dates from diff headers'))
127 127 ]
128 128
129 129 diffwsopts = [
130 130 ('w', 'ignore-all-space', None,
131 131 _('ignore white space when comparing lines')),
132 132 ('b', 'ignore-space-change', None,
133 133 _('ignore changes in the amount of white space')),
134 134 ('B', 'ignore-blank-lines', None,
135 135 _('ignore changes whose lines are all blank')),
136 136 ('Z', 'ignore-space-at-eol', None,
137 137 _('ignore changes in whitespace at EOL')),
138 138 ]
139 139
140 140 diffopts2 = [
141 141 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
142 142 ('p', 'show-function', None, _('show which function each change is in')),
143 143 ('', 'reverse', None, _('produce a diff that undoes the changes')),
144 144 ] + diffwsopts + [
145 145 ('U', 'unified', '',
146 146 _('number of lines of context to show'), _('NUM')),
147 147 ('', 'stat', None, _('output diffstat-style summary of changes')),
148 148 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
149 149 ]
150 150
151 151 mergetoolopts = [
152 152 ('t', 'tool', '', _('specify merge tool')),
153 153 ]
154 154
155 155 similarityopts = [
156 156 ('s', 'similarity', '',
157 157 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
158 158 ]
159 159
160 160 subrepoopts = [
161 161 ('S', 'subrepos', None,
162 162 _('recurse into subrepositories'))
163 163 ]
164 164
165 165 debugrevlogopts = [
166 166 ('c', 'changelog', False, _('open changelog')),
167 167 ('m', 'manifest', False, _('open manifest')),
168 168 ('', 'dir', '', _('open directory manifest')),
169 169 ]
170 170
171 171 # special string such that everything below this line will be ingored in the
172 172 # editor text
173 173 _linebelow = "^HG: ------------------------ >8 ------------------------$"
174 174
175 175 def ishunk(x):
176 176 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
177 177 return isinstance(x, hunkclasses)
178 178
179 179 def newandmodified(chunks, originalchunks):
180 180 newlyaddedandmodifiedfiles = set()
181 181 for chunk in chunks:
182 182 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
183 183 originalchunks:
184 184 newlyaddedandmodifiedfiles.add(chunk.header.filename())
185 185 return newlyaddedandmodifiedfiles
186 186
187 187 def parsealiases(cmd):
188 188 return cmd.lstrip("^").split("|")
189 189
190 190 def setupwrapcolorwrite(ui):
191 191 # wrap ui.write so diff output can be labeled/colorized
192 192 def wrapwrite(orig, *args, **kw):
193 193 label = kw.pop(r'label', '')
194 194 for chunk, l in patch.difflabel(lambda: args):
195 195 orig(chunk, label=label + l)
196 196
197 197 oldwrite = ui.write
198 198 def wrap(*args, **kwargs):
199 199 return wrapwrite(oldwrite, *args, **kwargs)
200 200 setattr(ui, 'write', wrap)
201 201 return oldwrite
202 202
203 203 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
204 204 try:
205 205 if usecurses:
206 206 if testfile:
207 207 recordfn = crecordmod.testdecorator(
208 208 testfile, crecordmod.testchunkselector)
209 209 else:
210 210 recordfn = crecordmod.chunkselector
211 211
212 212 return crecordmod.filterpatch(ui, originalhunks, recordfn,
213 213 operation)
214 214 except crecordmod.fallbackerror as e:
215 215 ui.warn('%s\n' % e.message)
216 216 ui.warn(_('falling back to text mode\n'))
217 217
218 218 return patch.filterpatch(ui, originalhunks, operation)
219 219
220 220 def recordfilter(ui, originalhunks, operation=None):
221 221 """ Prompts the user to filter the originalhunks and return a list of
222 222 selected hunks.
223 223 *operation* is used for to build ui messages to indicate the user what
224 224 kind of filtering they are doing: reverting, committing, shelving, etc.
225 225 (see patch.filterpatch).
226 226 """
227 227 usecurses = crecordmod.checkcurses(ui)
228 228 testfile = ui.config('experimental', 'crecordtest')
229 229 oldwrite = setupwrapcolorwrite(ui)
230 230 try:
231 231 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
232 232 testfile, operation)
233 233 finally:
234 234 ui.write = oldwrite
235 235 return newchunks, newopts
236 236
237 237 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
238 238 filterfn, *pats, **opts):
239 239 opts = pycompat.byteskwargs(opts)
240 240 if not ui.interactive():
241 241 if cmdsuggest:
242 242 msg = _('running non-interactively, use %s instead') % cmdsuggest
243 243 else:
244 244 msg = _('running non-interactively')
245 245 raise error.Abort(msg)
246 246
247 247 # make sure username is set before going interactive
248 248 if not opts.get('user'):
249 249 ui.username() # raise exception, username not provided
250 250
251 251 def recordfunc(ui, repo, message, match, opts):
252 252 """This is generic record driver.
253 253
254 254 Its job is to interactively filter local changes, and
255 255 accordingly prepare working directory into a state in which the
256 256 job can be delegated to a non-interactive commit command such as
257 257 'commit' or 'qrefresh'.
258 258
259 259 After the actual job is done by non-interactive command, the
260 260 working directory is restored to its original state.
261 261
262 262 In the end we'll record interesting changes, and everything else
263 263 will be left in place, so the user can continue working.
264 264 """
265 265
266 266 checkunfinished(repo, commit=True)
267 267 wctx = repo[None]
268 268 merge = len(wctx.parents()) > 1
269 269 if merge:
270 270 raise error.Abort(_('cannot partially commit a merge '
271 271 '(use "hg commit" instead)'))
272 272
273 273 def fail(f, msg):
274 274 raise error.Abort('%s: %s' % (f, msg))
275 275
276 276 force = opts.get('force')
277 277 if not force:
278 278 vdirs = []
279 279 match.explicitdir = vdirs.append
280 280 match.bad = fail
281 281
282 282 status = repo.status(match=match)
283 283 if not force:
284 284 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
285 285 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
286 286 diffopts.nodates = True
287 287 diffopts.git = True
288 288 diffopts.showfunc = True
289 289 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
290 290 originalchunks = patch.parsepatch(originaldiff)
291 291
292 292 # 1. filter patch, since we are intending to apply subset of it
293 293 try:
294 294 chunks, newopts = filterfn(ui, originalchunks)
295 295 except error.PatchError as err:
296 296 raise error.Abort(_('error parsing patch: %s') % err)
297 297 opts.update(newopts)
298 298
299 299 # We need to keep a backup of files that have been newly added and
300 300 # modified during the recording process because there is a previous
301 301 # version without the edit in the workdir
302 302 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
303 303 contenders = set()
304 304 for h in chunks:
305 305 try:
306 306 contenders.update(set(h.files()))
307 307 except AttributeError:
308 308 pass
309 309
310 310 changed = status.modified + status.added + status.removed
311 311 newfiles = [f for f in changed if f in contenders]
312 312 if not newfiles:
313 313 ui.status(_('no changes to record\n'))
314 314 return 0
315 315
316 316 modified = set(status.modified)
317 317
318 318 # 2. backup changed files, so we can restore them in the end
319 319
320 320 if backupall:
321 321 tobackup = changed
322 322 else:
323 323 tobackup = [f for f in newfiles if f in modified or f in \
324 324 newlyaddedandmodifiedfiles]
325 325 backups = {}
326 326 if tobackup:
327 327 backupdir = repo.vfs.join('record-backups')
328 328 try:
329 329 os.mkdir(backupdir)
330 330 except OSError as err:
331 331 if err.errno != errno.EEXIST:
332 332 raise
333 333 try:
334 334 # backup continues
335 335 for f in tobackup:
336 336 fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
337 337 dir=backupdir)
338 338 os.close(fd)
339 339 ui.debug('backup %r as %r\n' % (f, tmpname))
340 340 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
341 341 backups[f] = tmpname
342 342
343 343 fp = stringio()
344 344 for c in chunks:
345 345 fname = c.filename()
346 346 if fname in backups:
347 347 c.write(fp)
348 348 dopatch = fp.tell()
349 349 fp.seek(0)
350 350
351 351 # 2.5 optionally review / modify patch in text editor
352 352 if opts.get('review', False):
353 353 patchtext = (crecordmod.diffhelptext
354 354 + crecordmod.patchhelptext
355 355 + fp.read())
356 356 reviewedpatch = ui.edit(patchtext, "",
357 357 action="diff",
358 358 repopath=repo.path)
359 359 fp.truncate(0)
360 360 fp.write(reviewedpatch)
361 361 fp.seek(0)
362 362
363 363 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
364 364 # 3a. apply filtered patch to clean repo (clean)
365 365 if backups:
366 366 # Equivalent to hg.revert
367 367 m = scmutil.matchfiles(repo, backups.keys())
368 368 mergemod.update(repo, repo.dirstate.p1(),
369 369 False, True, matcher=m)
370 370
371 371 # 3b. (apply)
372 372 if dopatch:
373 373 try:
374 374 ui.debug('applying patch\n')
375 375 ui.debug(fp.getvalue())
376 376 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
377 377 except error.PatchError as err:
378 378 raise error.Abort(pycompat.bytestr(err))
379 379 del fp
380 380
381 381 # 4. We prepared working directory according to filtered
382 382 # patch. Now is the time to delegate the job to
383 383 # commit/qrefresh or the like!
384 384
385 385 # Make all of the pathnames absolute.
386 386 newfiles = [repo.wjoin(nf) for nf in newfiles]
387 387 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
388 388 finally:
389 389 # 5. finally restore backed-up files
390 390 try:
391 391 dirstate = repo.dirstate
392 392 for realname, tmpname in backups.iteritems():
393 393 ui.debug('restoring %r to %r\n' % (tmpname, realname))
394 394
395 395 if dirstate[realname] == 'n':
396 396 # without normallookup, restoring timestamp
397 397 # may cause partially committed files
398 398 # to be treated as unmodified
399 399 dirstate.normallookup(realname)
400 400
401 401 # copystat=True here and above are a hack to trick any
402 402 # editors that have f open that we haven't modified them.
403 403 #
404 404 # Also note that this racy as an editor could notice the
405 405 # file's mtime before we've finished writing it.
406 406 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
407 407 os.unlink(tmpname)
408 408 if tobackup:
409 409 os.rmdir(backupdir)
410 410 except OSError:
411 411 pass
412 412
413 413 def recordinwlock(ui, repo, message, match, opts):
414 414 with repo.wlock():
415 415 return recordfunc(ui, repo, message, match, opts)
416 416
417 417 return commit(ui, repo, recordinwlock, pats, opts)
418 418
419 419 class dirnode(object):
420 420 """
421 421 Represent a directory in user working copy with information required for
422 422 the purpose of tersing its status.
423 423
424 424 path is the path to the directory, without a trailing '/'
425 425
426 426 statuses is a set of statuses of all files in this directory (this includes
427 427 all the files in all the subdirectories too)
428 428
429 429 files is a list of files which are direct child of this directory
430 430
431 431 subdirs is a dictionary of sub-directory name as the key and it's own
432 432 dirnode object as the value
433 433 """
434 434
435 435 def __init__(self, dirpath):
436 436 self.path = dirpath
437 437 self.statuses = set([])
438 438 self.files = []
439 439 self.subdirs = {}
440 440
441 441 def _addfileindir(self, filename, status):
442 442 """Add a file in this directory as a direct child."""
443 443 self.files.append((filename, status))
444 444
445 445 def addfile(self, filename, status):
446 446 """
447 447 Add a file to this directory or to its direct parent directory.
448 448
449 449 If the file is not direct child of this directory, we traverse to the
450 450 directory of which this file is a direct child of and add the file
451 451 there.
452 452 """
453 453
454 454 # the filename contains a path separator, it means it's not the direct
455 455 # child of this directory
456 456 if '/' in filename:
457 457 subdir, filep = filename.split('/', 1)
458 458
459 459 # does the dirnode object for subdir exists
460 460 if subdir not in self.subdirs:
461 461 subdirpath = pathutil.join(self.path, subdir)
462 462 self.subdirs[subdir] = dirnode(subdirpath)
463 463
464 464 # try adding the file in subdir
465 465 self.subdirs[subdir].addfile(filep, status)
466 466
467 467 else:
468 468 self._addfileindir(filename, status)
469 469
470 470 if status not in self.statuses:
471 471 self.statuses.add(status)
472 472
473 473 def iterfilepaths(self):
474 474 """Yield (status, path) for files directly under this directory."""
475 475 for f, st in self.files:
476 476 yield st, pathutil.join(self.path, f)
477 477
478 478 def tersewalk(self, terseargs):
479 479 """
480 480 Yield (status, path) obtained by processing the status of this
481 481 dirnode.
482 482
483 483 terseargs is the string of arguments passed by the user with `--terse`
484 484 flag.
485 485
486 486 Following are the cases which can happen:
487 487
488 488 1) All the files in the directory (including all the files in its
489 489 subdirectories) share the same status and the user has asked us to terse
490 490 that status. -> yield (status, dirpath). dirpath will end in '/'.
491 491
492 492 2) Otherwise, we do following:
493 493
494 494 a) Yield (status, filepath) for all the files which are in this
495 495 directory (only the ones in this directory, not the subdirs)
496 496
497 497 b) Recurse the function on all the subdirectories of this
498 498 directory
499 499 """
500 500
501 501 if len(self.statuses) == 1:
502 502 onlyst = self.statuses.pop()
503 503
504 504 # Making sure we terse only when the status abbreviation is
505 505 # passed as terse argument
506 506 if onlyst in terseargs:
507 507 yield onlyst, self.path + '/'
508 508 return
509 509
510 510 # add the files to status list
511 511 for st, fpath in self.iterfilepaths():
512 512 yield st, fpath
513 513
514 514 #recurse on the subdirs
515 515 for dirobj in self.subdirs.values():
516 516 for st, fpath in dirobj.tersewalk(terseargs):
517 517 yield st, fpath
518 518
519 519 def tersedir(statuslist, terseargs):
520 520 """
521 521 Terse the status if all the files in a directory shares the same status.
522 522
523 523 statuslist is scmutil.status() object which contains a list of files for
524 524 each status.
525 525 terseargs is string which is passed by the user as the argument to `--terse`
526 526 flag.
527 527
528 528 The function makes a tree of objects of dirnode class, and at each node it
529 529 stores the information required to know whether we can terse a certain
530 530 directory or not.
531 531 """
532 532 # the order matters here as that is used to produce final list
533 533 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
534 534
535 535 # checking the argument validity
536 536 for s in pycompat.bytestr(terseargs):
537 537 if s not in allst:
538 538 raise error.Abort(_("'%s' not recognized") % s)
539 539
540 540 # creating a dirnode object for the root of the repo
541 541 rootobj = dirnode('')
542 542 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
543 543 'ignored', 'removed')
544 544
545 545 tersedict = {}
546 546 for attrname in pstatus:
547 547 statuschar = attrname[0:1]
548 548 for f in getattr(statuslist, attrname):
549 549 rootobj.addfile(f, statuschar)
550 550 tersedict[statuschar] = []
551 551
552 552 # we won't be tersing the root dir, so add files in it
553 553 for st, fpath in rootobj.iterfilepaths():
554 554 tersedict[st].append(fpath)
555 555
556 556 # process each sub-directory and build tersedict
557 557 for subdir in rootobj.subdirs.values():
558 558 for st, f in subdir.tersewalk(terseargs):
559 559 tersedict[st].append(f)
560 560
561 561 tersedlist = []
562 562 for st in allst:
563 563 tersedict[st].sort()
564 564 tersedlist.append(tersedict[st])
565 565
566 566 return tersedlist
567 567
568 568 def _commentlines(raw):
569 569 '''Surround lineswith a comment char and a new line'''
570 570 lines = raw.splitlines()
571 571 commentedlines = ['# %s' % line for line in lines]
572 572 return '\n'.join(commentedlines) + '\n'
573 573
574 574 def _conflictsmsg(repo):
575 575 mergestate = mergemod.mergestate.read(repo)
576 576 if not mergestate.active():
577 577 return
578 578
579 579 m = scmutil.match(repo[None])
580 580 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
581 581 if unresolvedlist:
582 582 mergeliststr = '\n'.join(
583 583 [' %s' % util.pathto(repo.root, encoding.getcwd(), path)
584 584 for path in unresolvedlist])
585 585 msg = _('''Unresolved merge conflicts:
586 586
587 587 %s
588 588
589 589 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
590 590 else:
591 591 msg = _('No unresolved merge conflicts.')
592 592
593 593 return _commentlines(msg)
594 594
595 595 def _helpmessage(continuecmd, abortcmd):
596 596 msg = _('To continue: %s\n'
597 597 'To abort: %s') % (continuecmd, abortcmd)
598 598 return _commentlines(msg)
599 599
600 600 def _rebasemsg():
601 601 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
602 602
603 603 def _histeditmsg():
604 604 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
605 605
606 606 def _unshelvemsg():
607 607 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
608 608
609 609 def _graftmsg():
610 610 # tweakdefaults requires `update` to have a rev hence the `.`
611 611 return _helpmessage('hg graft --continue', 'hg graft --abort')
612 612
613 613 def _mergemsg():
614 614 # tweakdefaults requires `update` to have a rev hence the `.`
615 615 return _helpmessage('hg commit', 'hg merge --abort')
616 616
617 617 def _bisectmsg():
618 618 msg = _('To mark the changeset good: hg bisect --good\n'
619 619 'To mark the changeset bad: hg bisect --bad\n'
620 620 'To abort: hg bisect --reset\n')
621 621 return _commentlines(msg)
622 622
623 623 def fileexistspredicate(filename):
624 624 return lambda repo: repo.vfs.exists(filename)
625 625
626 626 def _mergepredicate(repo):
627 627 return len(repo[None].parents()) > 1
628 628
629 629 STATES = (
630 630 # (state, predicate to detect states, helpful message function)
631 631 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
632 632 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
633 633 ('graft', fileexistspredicate('graftstate'), _graftmsg),
634 634 ('unshelve', fileexistspredicate('shelvedstate'), _unshelvemsg),
635 635 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
636 636 # The merge state is part of a list that will be iterated over.
637 637 # They need to be last because some of the other unfinished states may also
638 638 # be in a merge or update state (eg. rebase, histedit, graft, etc).
639 639 # We want those to have priority.
640 640 ('merge', _mergepredicate, _mergemsg),
641 641 )
642 642
643 643 def _getrepostate(repo):
644 644 # experimental config: commands.status.skipstates
645 645 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
646 646 for state, statedetectionpredicate, msgfn in STATES:
647 647 if state in skip:
648 648 continue
649 649 if statedetectionpredicate(repo):
650 650 return (state, statedetectionpredicate, msgfn)
651 651
652 652 def morestatus(repo, fm):
653 653 statetuple = _getrepostate(repo)
654 654 label = 'status.morestatus'
655 655 if statetuple:
656 656 state, statedetectionpredicate, helpfulmsg = statetuple
657 657 statemsg = _('The repository is in an unfinished *%s* state.') % state
658 658 fm.plain('%s\n' % _commentlines(statemsg), label=label)
659 659 conmsg = _conflictsmsg(repo)
660 660 if conmsg:
661 661 fm.plain('%s\n' % conmsg, label=label)
662 662 if helpfulmsg:
663 663 helpmsg = helpfulmsg()
664 664 fm.plain('%s\n' % helpmsg, label=label)
665 665
666 666 def findpossible(cmd, table, strict=False):
667 667 """
668 668 Return cmd -> (aliases, command table entry)
669 669 for each matching command.
670 670 Return debug commands (or their aliases) only if no normal command matches.
671 671 """
672 672 choice = {}
673 673 debugchoice = {}
674 674
675 675 if cmd in table:
676 676 # short-circuit exact matches, "log" alias beats "^log|history"
677 677 keys = [cmd]
678 678 else:
679 679 keys = table.keys()
680 680
681 681 allcmds = []
682 682 for e in keys:
683 683 aliases = parsealiases(e)
684 684 allcmds.extend(aliases)
685 685 found = None
686 686 if cmd in aliases:
687 687 found = cmd
688 688 elif not strict:
689 689 for a in aliases:
690 690 if a.startswith(cmd):
691 691 found = a
692 692 break
693 693 if found is not None:
694 694 if aliases[0].startswith("debug") or found.startswith("debug"):
695 695 debugchoice[found] = (aliases, table[e])
696 696 else:
697 697 choice[found] = (aliases, table[e])
698 698
699 699 if not choice and debugchoice:
700 700 choice = debugchoice
701 701
702 702 return choice, allcmds
703 703
704 704 def findcmd(cmd, table, strict=True):
705 705 """Return (aliases, command table entry) for command string."""
706 706 choice, allcmds = findpossible(cmd, table, strict)
707 707
708 708 if cmd in choice:
709 709 return choice[cmd]
710 710
711 711 if len(choice) > 1:
712 712 clist = sorted(choice)
713 713 raise error.AmbiguousCommand(cmd, clist)
714 714
715 715 if choice:
716 716 return list(choice.values())[0]
717 717
718 718 raise error.UnknownCommand(cmd, allcmds)
719 719
720 720 def changebranch(ui, repo, revs, label):
721 721 """ Change the branch name of given revs to label """
722 722
723 723 with repo.wlock(), repo.lock(), repo.transaction('branches'):
724 724 # abort in case of uncommitted merge or dirty wdir
725 725 bailifchanged(repo)
726 726 revs = scmutil.revrange(repo, revs)
727 727 if not revs:
728 728 raise error.Abort("empty revision set")
729 729 roots = repo.revs('roots(%ld)', revs)
730 730 if len(roots) > 1:
731 731 raise error.Abort(_("cannot change branch of non-linear revisions"))
732 732 rewriteutil.precheck(repo, revs, 'change branch of')
733 733
734 734 root = repo[roots.first()]
735 735 if not root.p1().branch() == label and label in repo.branchmap():
736 736 raise error.Abort(_("a branch of the same name already exists"))
737 737
738 738 if repo.revs('merge() and %ld', revs):
739 739 raise error.Abort(_("cannot change branch of a merge commit"))
740 740 if repo.revs('obsolete() and %ld', revs):
741 741 raise error.Abort(_("cannot change branch of a obsolete changeset"))
742 742
743 743 # make sure only topological heads
744 744 if repo.revs('heads(%ld) - head()', revs):
745 745 raise error.Abort(_("cannot change branch in middle of a stack"))
746 746
747 747 replacements = {}
748 748 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
749 749 # mercurial.subrepo -> mercurial.cmdutil
750 750 from . import context
751 751 for rev in revs:
752 752 ctx = repo[rev]
753 753 oldbranch = ctx.branch()
754 754 # check if ctx has same branch
755 755 if oldbranch == label:
756 756 continue
757 757
758 758 def filectxfn(repo, newctx, path):
759 759 try:
760 760 return ctx[path]
761 761 except error.ManifestLookupError:
762 762 return None
763 763
764 764 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
765 765 % (hex(ctx.node()), oldbranch, label))
766 766 extra = ctx.extra()
767 767 extra['branch_change'] = hex(ctx.node())
768 768 # While changing branch of set of linear commits, make sure that
769 769 # we base our commits on new parent rather than old parent which
770 770 # was obsoleted while changing the branch
771 771 p1 = ctx.p1().node()
772 772 p2 = ctx.p2().node()
773 773 if p1 in replacements:
774 774 p1 = replacements[p1][0]
775 775 if p2 in replacements:
776 776 p2 = replacements[p2][0]
777 777
778 778 mc = context.memctx(repo, (p1, p2),
779 779 ctx.description(),
780 780 ctx.files(),
781 781 filectxfn,
782 782 user=ctx.user(),
783 783 date=ctx.date(),
784 784 extra=extra,
785 785 branch=label)
786 786
787 787 newnode = repo.commitctx(mc)
788 788 replacements[ctx.node()] = (newnode,)
789 789 ui.debug('new node id is %s\n' % hex(newnode))
790 790
791 791 # create obsmarkers and move bookmarks
792 792 scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
793 793
794 794 # move the working copy too
795 795 wctx = repo[None]
796 796 # in-progress merge is a bit too complex for now.
797 797 if len(wctx.parents()) == 1:
798 798 newid = replacements.get(wctx.p1().node())
799 799 if newid is not None:
800 800 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
801 801 # mercurial.cmdutil
802 802 from . import hg
803 803 hg.update(repo, newid[0], quietempty=True)
804 804
805 805 ui.status(_("changed branch on %d changesets\n") % len(replacements))
806 806
807 807 def findrepo(p):
808 808 while not os.path.isdir(os.path.join(p, ".hg")):
809 809 oldp, p = p, os.path.dirname(p)
810 810 if p == oldp:
811 811 return None
812 812
813 813 return p
814 814
815 815 def bailifchanged(repo, merge=True, hint=None):
816 816 """ enforce the precondition that working directory must be clean.
817 817
818 818 'merge' can be set to false if a pending uncommitted merge should be
819 819 ignored (such as when 'update --check' runs).
820 820
821 821 'hint' is the usual hint given to Abort exception.
822 822 """
823 823
824 824 if merge and repo.dirstate.p2() != nullid:
825 825 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
826 826 modified, added, removed, deleted = repo.status()[:4]
827 827 if modified or added or removed or deleted:
828 828 raise error.Abort(_('uncommitted changes'), hint=hint)
829 829 ctx = repo[None]
830 830 for s in sorted(ctx.substate):
831 831 ctx.sub(s).bailifchanged(hint=hint)
832 832
833 833 def logmessage(ui, opts):
834 834 """ get the log message according to -m and -l option """
835 835 message = opts.get('message')
836 836 logfile = opts.get('logfile')
837 837
838 838 if message and logfile:
839 839 raise error.Abort(_('options --message and --logfile are mutually '
840 840 'exclusive'))
841 841 if not message and logfile:
842 842 try:
843 843 if isstdiofilename(logfile):
844 844 message = ui.fin.read()
845 845 else:
846 846 message = '\n'.join(util.readfile(logfile).splitlines())
847 847 except IOError as inst:
848 848 raise error.Abort(_("can't read commit message '%s': %s") %
849 849 (logfile, encoding.strtolocal(inst.strerror)))
850 850 return message
851 851
852 852 def mergeeditform(ctxorbool, baseformname):
853 853 """return appropriate editform name (referencing a committemplate)
854 854
855 855 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
856 856 merging is committed.
857 857
858 858 This returns baseformname with '.merge' appended if it is a merge,
859 859 otherwise '.normal' is appended.
860 860 """
861 861 if isinstance(ctxorbool, bool):
862 862 if ctxorbool:
863 863 return baseformname + ".merge"
864 864 elif len(ctxorbool.parents()) > 1:
865 865 return baseformname + ".merge"
866 866
867 867 return baseformname + ".normal"
868 868
869 869 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
870 870 editform='', **opts):
871 871 """get appropriate commit message editor according to '--edit' option
872 872
873 873 'finishdesc' is a function to be called with edited commit message
874 874 (= 'description' of the new changeset) just after editing, but
875 875 before checking empty-ness. It should return actual text to be
876 876 stored into history. This allows to change description before
877 877 storing.
878 878
879 879 'extramsg' is a extra message to be shown in the editor instead of
880 880 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
881 881 is automatically added.
882 882
883 883 'editform' is a dot-separated list of names, to distinguish
884 884 the purpose of commit text editing.
885 885
886 886 'getcommiteditor' returns 'commitforceeditor' regardless of
887 887 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
888 888 they are specific for usage in MQ.
889 889 """
890 890 if edit or finishdesc or extramsg:
891 891 return lambda r, c, s: commitforceeditor(r, c, s,
892 892 finishdesc=finishdesc,
893 893 extramsg=extramsg,
894 894 editform=editform)
895 895 elif editform:
896 896 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
897 897 else:
898 898 return commiteditor
899 899
900 900 def _escapecommandtemplate(tmpl):
901 901 parts = []
902 902 for typ, start, end in templater.scantemplate(tmpl, raw=True):
903 903 if typ == b'string':
904 904 parts.append(stringutil.escapestr(tmpl[start:end]))
905 905 else:
906 906 parts.append(tmpl[start:end])
907 907 return b''.join(parts)
908 908
909 909 def rendercommandtemplate(ui, tmpl, props):
910 910 r"""Expand a literal template 'tmpl' in a way suitable for command line
911 911
912 912 '\' in outermost string is not taken as an escape character because it
913 913 is a directory separator on Windows.
914 914
915 915 >>> from . import ui as uimod
916 916 >>> ui = uimod.ui()
917 917 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
918 918 'c:\\foo'
919 919 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
920 920 'c:{path}'
921 921 """
922 922 if not tmpl:
923 923 return tmpl
924 924 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
925 925 return t.renderdefault(props)
926 926
927 927 def rendertemplate(ctx, tmpl, props=None):
928 928 """Expand a literal template 'tmpl' byte-string against one changeset
929 929
930 930 Each props item must be a stringify-able value or a callable returning
931 931 such value, i.e. no bare list nor dict should be passed.
932 932 """
933 933 repo = ctx.repo()
934 934 tres = formatter.templateresources(repo.ui, repo)
935 935 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
936 936 resources=tres)
937 937 mapping = {'ctx': ctx}
938 938 if props:
939 939 mapping.update(props)
940 940 return t.renderdefault(mapping)
941 941
942 942 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
943 943 r"""Convert old-style filename format string to template string
944 944
945 945 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
946 946 'foo-{reporoot|basename}-{seqno}.patch'
947 947 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
948 948 '{rev}{tags % "{tag}"}{node}'
949 949
950 950 '\' in outermost strings has to be escaped because it is a directory
951 951 separator on Windows:
952 952
953 953 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
954 954 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
955 955 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
956 956 '\\\\\\\\foo\\\\bar.patch'
957 957 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
958 958 '\\\\{tags % "{tag}"}'
959 959
960 960 but inner strings follow the template rules (i.e. '\' is taken as an
961 961 escape character):
962 962
963 963 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
964 964 '{"c:\\tmp"}'
965 965 """
966 966 expander = {
967 967 b'H': b'{node}',
968 968 b'R': b'{rev}',
969 969 b'h': b'{node|short}',
970 970 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
971 971 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
972 972 b'%': b'%',
973 973 b'b': b'{reporoot|basename}',
974 974 }
975 975 if total is not None:
976 976 expander[b'N'] = b'{total}'
977 977 if seqno is not None:
978 978 expander[b'n'] = b'{seqno}'
979 979 if total is not None and seqno is not None:
980 980 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
981 981 if pathname is not None:
982 982 expander[b's'] = b'{pathname|basename}'
983 983 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
984 984 expander[b'p'] = b'{pathname}'
985 985
986 986 newname = []
987 987 for typ, start, end in templater.scantemplate(pat, raw=True):
988 988 if typ != b'string':
989 989 newname.append(pat[start:end])
990 990 continue
991 991 i = start
992 992 while i < end:
993 993 n = pat.find(b'%', i, end)
994 994 if n < 0:
995 995 newname.append(stringutil.escapestr(pat[i:end]))
996 996 break
997 997 newname.append(stringutil.escapestr(pat[i:n]))
998 998 if n + 2 > end:
999 999 raise error.Abort(_("incomplete format spec in output "
1000 1000 "filename"))
1001 1001 c = pat[n + 1:n + 2]
1002 1002 i = n + 2
1003 1003 try:
1004 1004 newname.append(expander[c])
1005 1005 except KeyError:
1006 1006 raise error.Abort(_("invalid format spec '%%%s' in output "
1007 1007 "filename") % c)
1008 1008 return ''.join(newname)
1009 1009
1010 1010 def makefilename(ctx, pat, **props):
1011 1011 if not pat:
1012 1012 return pat
1013 1013 tmpl = _buildfntemplate(pat, **props)
1014 1014 # BUG: alias expansion shouldn't be made against template fragments
1015 1015 # rewritten from %-format strings, but we have no easy way to partially
1016 1016 # disable the expansion.
1017 1017 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1018 1018
1019 1019 def isstdiofilename(pat):
1020 1020 """True if the given pat looks like a filename denoting stdin/stdout"""
1021 1021 return not pat or pat == '-'
1022 1022
1023 1023 class _unclosablefile(object):
1024 1024 def __init__(self, fp):
1025 1025 self._fp = fp
1026 1026
1027 1027 def close(self):
1028 1028 pass
1029 1029
1030 1030 def __iter__(self):
1031 1031 return iter(self._fp)
1032 1032
1033 1033 def __getattr__(self, attr):
1034 1034 return getattr(self._fp, attr)
1035 1035
1036 1036 def __enter__(self):
1037 1037 return self
1038 1038
1039 1039 def __exit__(self, exc_type, exc_value, exc_tb):
1040 1040 pass
1041 1041
1042 1042 def makefileobj(ctx, pat, mode='wb', **props):
1043 1043 writable = mode not in ('r', 'rb')
1044 1044
1045 1045 if isstdiofilename(pat):
1046 1046 repo = ctx.repo()
1047 1047 if writable:
1048 1048 fp = repo.ui.fout
1049 1049 else:
1050 1050 fp = repo.ui.fin
1051 1051 return _unclosablefile(fp)
1052 1052 fn = makefilename(ctx, pat, **props)
1053 1053 return open(fn, mode)
1054 1054
1055 1055 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1056 1056 """opens the changelog, manifest, a filelog or a given revlog"""
1057 1057 cl = opts['changelog']
1058 1058 mf = opts['manifest']
1059 1059 dir = opts['dir']
1060 1060 msg = None
1061 1061 if cl and mf:
1062 1062 msg = _('cannot specify --changelog and --manifest at the same time')
1063 1063 elif cl and dir:
1064 1064 msg = _('cannot specify --changelog and --dir at the same time')
1065 1065 elif cl or mf or dir:
1066 1066 if file_:
1067 1067 msg = _('cannot specify filename with --changelog or --manifest')
1068 1068 elif not repo:
1069 1069 msg = _('cannot specify --changelog or --manifest or --dir '
1070 1070 'without a repository')
1071 1071 if msg:
1072 1072 raise error.Abort(msg)
1073 1073
1074 1074 r = None
1075 1075 if repo:
1076 1076 if cl:
1077 1077 r = repo.unfiltered().changelog
1078 1078 elif dir:
1079 1079 if 'treemanifest' not in repo.requirements:
1080 1080 raise error.Abort(_("--dir can only be used on repos with "
1081 1081 "treemanifest enabled"))
1082 1082 if not dir.endswith('/'):
1083 1083 dir = dir + '/'
1084 1084 dirlog = repo.manifestlog.getstorage(dir)
1085 1085 if len(dirlog):
1086 1086 r = dirlog
1087 1087 elif mf:
1088 1088 r = repo.manifestlog.getstorage(b'')
1089 1089 elif file_:
1090 1090 filelog = repo.file(file_)
1091 1091 if len(filelog):
1092 1092 r = filelog
1093 1093
1094 1094 # Not all storage may be revlogs. If requested, try to return an actual
1095 1095 # revlog instance.
1096 1096 if returnrevlog:
1097 1097 if isinstance(r, revlog.revlog):
1098 1098 pass
1099 1099 elif util.safehasattr(r, '_revlog'):
1100 1100 r = r._revlog
1101 1101 elif r is not None:
1102 1102 raise error.Abort(_('%r does not appear to be a revlog') % r)
1103 1103
1104 1104 if not r:
1105 1105 if not returnrevlog:
1106 1106 raise error.Abort(_('cannot give path to non-revlog'))
1107 1107
1108 1108 if not file_:
1109 1109 raise error.CommandError(cmd, _('invalid arguments'))
1110 1110 if not os.path.isfile(file_):
1111 1111 raise error.Abort(_("revlog '%s' not found") % file_)
1112 1112 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
1113 1113 file_[:-2] + ".i")
1114 1114 return r
1115 1115
1116 1116 def openrevlog(repo, cmd, file_, opts):
1117 1117 """Obtain a revlog backing storage of an item.
1118 1118
1119 1119 This is similar to ``openstorage()`` except it always returns a revlog.
1120 1120
1121 1121 In most cases, a caller cares about the main storage object - not the
1122 1122 revlog backing it. Therefore, this function should only be used by code
1123 1123 that needs to examine low-level revlog implementation details. e.g. debug
1124 1124 commands.
1125 1125 """
1126 1126 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1127 1127
1128 1128 def copy(ui, repo, pats, opts, rename=False):
1129 1129 # called with the repo lock held
1130 1130 #
1131 1131 # hgsep => pathname that uses "/" to separate directories
1132 1132 # ossep => pathname that uses os.sep to separate directories
1133 1133 cwd = repo.getcwd()
1134 1134 targets = {}
1135 1135 after = opts.get("after")
1136 1136 dryrun = opts.get("dry_run")
1137 1137 wctx = repo[None]
1138 1138
1139 1139 def walkpat(pat):
1140 1140 srcs = []
1141 1141 if after:
1142 1142 badstates = '?'
1143 1143 else:
1144 1144 badstates = '?r'
1145 1145 m = scmutil.match(wctx, [pat], opts, globbed=True)
1146 1146 for abs in wctx.walk(m):
1147 1147 state = repo.dirstate[abs]
1148 1148 rel = m.rel(abs)
1149 1149 exact = m.exact(abs)
1150 1150 if state in badstates:
1151 1151 if exact and state == '?':
1152 1152 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1153 1153 if exact and state == 'r':
1154 1154 ui.warn(_('%s: not copying - file has been marked for'
1155 1155 ' remove\n') % rel)
1156 1156 continue
1157 1157 # abs: hgsep
1158 1158 # rel: ossep
1159 1159 srcs.append((abs, rel, exact))
1160 1160 return srcs
1161 1161
1162 1162 # abssrc: hgsep
1163 1163 # relsrc: ossep
1164 1164 # otarget: ossep
1165 1165 def copyfile(abssrc, relsrc, otarget, exact):
1166 1166 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1167 1167 if '/' in abstarget:
1168 1168 # We cannot normalize abstarget itself, this would prevent
1169 1169 # case only renames, like a => A.
1170 1170 abspath, absname = abstarget.rsplit('/', 1)
1171 1171 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1172 1172 reltarget = repo.pathto(abstarget, cwd)
1173 1173 target = repo.wjoin(abstarget)
1174 1174 src = repo.wjoin(abssrc)
1175 1175 state = repo.dirstate[abstarget]
1176 1176
1177 1177 scmutil.checkportable(ui, abstarget)
1178 1178
1179 1179 # check for collisions
1180 1180 prevsrc = targets.get(abstarget)
1181 1181 if prevsrc is not None:
1182 1182 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1183 1183 (reltarget, repo.pathto(abssrc, cwd),
1184 1184 repo.pathto(prevsrc, cwd)))
1185 1185 return True # report a failure
1186 1186
1187 1187 # check for overwrites
1188 1188 exists = os.path.lexists(target)
1189 1189 samefile = False
1190 1190 if exists and abssrc != abstarget:
1191 1191 if (repo.dirstate.normalize(abssrc) ==
1192 1192 repo.dirstate.normalize(abstarget)):
1193 1193 if not rename:
1194 1194 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1195 1195 return True # report a failure
1196 1196 exists = False
1197 1197 samefile = True
1198 1198
1199 1199 if not after and exists or after and state in 'mn':
1200 1200 if not opts['force']:
1201 1201 if state in 'mn':
1202 1202 msg = _('%s: not overwriting - file already committed\n')
1203 1203 if after:
1204 1204 flags = '--after --force'
1205 1205 else:
1206 1206 flags = '--force'
1207 1207 if rename:
1208 1208 hint = _("('hg rename %s' to replace the file by "
1209 1209 'recording a rename)\n') % flags
1210 1210 else:
1211 1211 hint = _("('hg copy %s' to replace the file by "
1212 1212 'recording a copy)\n') % flags
1213 1213 else:
1214 1214 msg = _('%s: not overwriting - file exists\n')
1215 1215 if rename:
1216 1216 hint = _("('hg rename --after' to record the rename)\n")
1217 1217 else:
1218 1218 hint = _("('hg copy --after' to record the copy)\n")
1219 1219 ui.warn(msg % reltarget)
1220 1220 ui.warn(hint)
1221 1221 return True # report a failure
1222 1222
1223 1223 if after:
1224 1224 if not exists:
1225 1225 if rename:
1226 1226 ui.warn(_('%s: not recording move - %s does not exist\n') %
1227 1227 (relsrc, reltarget))
1228 1228 else:
1229 1229 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1230 1230 (relsrc, reltarget))
1231 1231 return True # report a failure
1232 1232 elif not dryrun:
1233 1233 try:
1234 1234 if exists:
1235 1235 os.unlink(target)
1236 1236 targetdir = os.path.dirname(target) or '.'
1237 1237 if not os.path.isdir(targetdir):
1238 1238 os.makedirs(targetdir)
1239 1239 if samefile:
1240 1240 tmp = target + "~hgrename"
1241 1241 os.rename(src, tmp)
1242 1242 os.rename(tmp, target)
1243 1243 else:
1244 1244 # Preserve stat info on renames, not on copies; this matches
1245 1245 # Linux CLI behavior.
1246 1246 util.copyfile(src, target, copystat=rename)
1247 1247 srcexists = True
1248 1248 except IOError as inst:
1249 1249 if inst.errno == errno.ENOENT:
1250 1250 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1251 1251 srcexists = False
1252 1252 else:
1253 1253 ui.warn(_('%s: cannot copy - %s\n') %
1254 1254 (relsrc, encoding.strtolocal(inst.strerror)))
1255 1255 if rename:
1256 1256 hint = _("('hg rename --after' to record the rename)\n")
1257 1257 else:
1258 1258 hint = _("('hg copy --after' to record the copy)\n")
1259 1259 return True # report a failure
1260 1260
1261 1261 if ui.verbose or not exact:
1262 1262 if rename:
1263 1263 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1264 1264 else:
1265 1265 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1266 1266
1267 1267 targets[abstarget] = abssrc
1268 1268
1269 1269 # fix up dirstate
1270 1270 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1271 1271 dryrun=dryrun, cwd=cwd)
1272 1272 if rename and not dryrun:
1273 1273 if not after and srcexists and not samefile:
1274 1274 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
1275 1275 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1276 1276 wctx.forget([abssrc])
1277 1277
1278 1278 # pat: ossep
1279 1279 # dest ossep
1280 1280 # srcs: list of (hgsep, hgsep, ossep, bool)
1281 1281 # return: function that takes hgsep and returns ossep
1282 1282 def targetpathfn(pat, dest, srcs):
1283 1283 if os.path.isdir(pat):
1284 1284 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1285 1285 abspfx = util.localpath(abspfx)
1286 1286 if destdirexists:
1287 1287 striplen = len(os.path.split(abspfx)[0])
1288 1288 else:
1289 1289 striplen = len(abspfx)
1290 1290 if striplen:
1291 1291 striplen += len(pycompat.ossep)
1292 1292 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1293 1293 elif destdirexists:
1294 1294 res = lambda p: os.path.join(dest,
1295 1295 os.path.basename(util.localpath(p)))
1296 1296 else:
1297 1297 res = lambda p: dest
1298 1298 return res
1299 1299
1300 1300 # pat: ossep
1301 1301 # dest ossep
1302 1302 # srcs: list of (hgsep, hgsep, ossep, bool)
1303 1303 # return: function that takes hgsep and returns ossep
1304 1304 def targetpathafterfn(pat, dest, srcs):
1305 1305 if matchmod.patkind(pat):
1306 1306 # a mercurial pattern
1307 1307 res = lambda p: os.path.join(dest,
1308 1308 os.path.basename(util.localpath(p)))
1309 1309 else:
1310 1310 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1311 1311 if len(abspfx) < len(srcs[0][0]):
1312 1312 # A directory. Either the target path contains the last
1313 1313 # component of the source path or it does not.
1314 1314 def evalpath(striplen):
1315 1315 score = 0
1316 1316 for s in srcs:
1317 1317 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1318 1318 if os.path.lexists(t):
1319 1319 score += 1
1320 1320 return score
1321 1321
1322 1322 abspfx = util.localpath(abspfx)
1323 1323 striplen = len(abspfx)
1324 1324 if striplen:
1325 1325 striplen += len(pycompat.ossep)
1326 1326 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1327 1327 score = evalpath(striplen)
1328 1328 striplen1 = len(os.path.split(abspfx)[0])
1329 1329 if striplen1:
1330 1330 striplen1 += len(pycompat.ossep)
1331 1331 if evalpath(striplen1) > score:
1332 1332 striplen = striplen1
1333 1333 res = lambda p: os.path.join(dest,
1334 1334 util.localpath(p)[striplen:])
1335 1335 else:
1336 1336 # a file
1337 1337 if destdirexists:
1338 1338 res = lambda p: os.path.join(dest,
1339 1339 os.path.basename(util.localpath(p)))
1340 1340 else:
1341 1341 res = lambda p: dest
1342 1342 return res
1343 1343
1344 1344 pats = scmutil.expandpats(pats)
1345 1345 if not pats:
1346 1346 raise error.Abort(_('no source or destination specified'))
1347 1347 if len(pats) == 1:
1348 1348 raise error.Abort(_('no destination specified'))
1349 1349 dest = pats.pop()
1350 1350 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1351 1351 if not destdirexists:
1352 1352 if len(pats) > 1 or matchmod.patkind(pats[0]):
1353 1353 raise error.Abort(_('with multiple sources, destination must be an '
1354 1354 'existing directory'))
1355 1355 if util.endswithsep(dest):
1356 1356 raise error.Abort(_('destination %s is not a directory') % dest)
1357 1357
1358 1358 tfn = targetpathfn
1359 1359 if after:
1360 1360 tfn = targetpathafterfn
1361 1361 copylist = []
1362 1362 for pat in pats:
1363 1363 srcs = walkpat(pat)
1364 1364 if not srcs:
1365 1365 continue
1366 1366 copylist.append((tfn(pat, dest, srcs), srcs))
1367 1367 if not copylist:
1368 1368 raise error.Abort(_('no files to copy'))
1369 1369
1370 1370 errors = 0
1371 1371 for targetpath, srcs in copylist:
1372 1372 for abssrc, relsrc, exact in srcs:
1373 1373 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1374 1374 errors += 1
1375 1375
1376 1376 return errors != 0
1377 1377
1378 1378 ## facility to let extension process additional data into an import patch
1379 1379 # list of identifier to be executed in order
1380 1380 extrapreimport = [] # run before commit
1381 1381 extrapostimport = [] # run after commit
1382 1382 # mapping from identifier to actual import function
1383 1383 #
1384 1384 # 'preimport' are run before the commit is made and are provided the following
1385 1385 # arguments:
1386 1386 # - repo: the localrepository instance,
1387 1387 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1388 1388 # - extra: the future extra dictionary of the changeset, please mutate it,
1389 1389 # - opts: the import options.
1390 1390 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1391 1391 # mutation of in memory commit and more. Feel free to rework the code to get
1392 1392 # there.
1393 1393 extrapreimportmap = {}
1394 1394 # 'postimport' are run after the commit is made and are provided the following
1395 1395 # argument:
1396 1396 # - ctx: the changectx created by import.
1397 1397 extrapostimportmap = {}
1398 1398
1399 1399 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1400 1400 """Utility function used by commands.import to import a single patch
1401 1401
1402 1402 This function is explicitly defined here to help the evolve extension to
1403 1403 wrap this part of the import logic.
1404 1404
1405 1405 The API is currently a bit ugly because it a simple code translation from
1406 1406 the import command. Feel free to make it better.
1407 1407
1408 1408 :patchdata: a dictionary containing parsed patch data (such as from
1409 1409 ``patch.extract()``)
1410 1410 :parents: nodes that will be parent of the created commit
1411 1411 :opts: the full dict of option passed to the import command
1412 1412 :msgs: list to save commit message to.
1413 1413 (used in case we need to save it when failing)
1414 1414 :updatefunc: a function that update a repo to a given node
1415 1415 updatefunc(<repo>, <node>)
1416 1416 """
1417 1417 # avoid cycle context -> subrepo -> cmdutil
1418 1418 from . import context
1419 1419
1420 1420 tmpname = patchdata.get('filename')
1421 1421 message = patchdata.get('message')
1422 1422 user = opts.get('user') or patchdata.get('user')
1423 1423 date = opts.get('date') or patchdata.get('date')
1424 1424 branch = patchdata.get('branch')
1425 1425 nodeid = patchdata.get('nodeid')
1426 1426 p1 = patchdata.get('p1')
1427 1427 p2 = patchdata.get('p2')
1428 1428
1429 1429 nocommit = opts.get('no_commit')
1430 1430 importbranch = opts.get('import_branch')
1431 1431 update = not opts.get('bypass')
1432 1432 strip = opts["strip"]
1433 1433 prefix = opts["prefix"]
1434 1434 sim = float(opts.get('similarity') or 0)
1435 1435
1436 1436 if not tmpname:
1437 1437 return None, None, False
1438 1438
1439 1439 rejects = False
1440 1440
1441 1441 cmdline_message = logmessage(ui, opts)
1442 1442 if cmdline_message:
1443 1443 # pickup the cmdline msg
1444 1444 message = cmdline_message
1445 1445 elif message:
1446 1446 # pickup the patch msg
1447 1447 message = message.strip()
1448 1448 else:
1449 1449 # launch the editor
1450 1450 message = None
1451 1451 ui.debug('message:\n%s\n' % (message or ''))
1452 1452
1453 1453 if len(parents) == 1:
1454 1454 parents.append(repo[nullid])
1455 1455 if opts.get('exact'):
1456 1456 if not nodeid or not p1:
1457 1457 raise error.Abort(_('not a Mercurial patch'))
1458 1458 p1 = repo[p1]
1459 1459 p2 = repo[p2 or nullid]
1460 1460 elif p2:
1461 1461 try:
1462 1462 p1 = repo[p1]
1463 1463 p2 = repo[p2]
1464 1464 # Without any options, consider p2 only if the
1465 1465 # patch is being applied on top of the recorded
1466 1466 # first parent.
1467 1467 if p1 != parents[0]:
1468 1468 p1 = parents[0]
1469 1469 p2 = repo[nullid]
1470 1470 except error.RepoError:
1471 1471 p1, p2 = parents
1472 1472 if p2.node() == nullid:
1473 1473 ui.warn(_("warning: import the patch as a normal revision\n"
1474 1474 "(use --exact to import the patch as a merge)\n"))
1475 1475 else:
1476 1476 p1, p2 = parents
1477 1477
1478 1478 n = None
1479 1479 if update:
1480 1480 if p1 != parents[0]:
1481 1481 updatefunc(repo, p1.node())
1482 1482 if p2 != parents[1]:
1483 1483 repo.setparents(p1.node(), p2.node())
1484 1484
1485 1485 if opts.get('exact') or importbranch:
1486 1486 repo.dirstate.setbranch(branch or 'default')
1487 1487
1488 1488 partial = opts.get('partial', False)
1489 1489 files = set()
1490 1490 try:
1491 1491 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1492 1492 files=files, eolmode=None, similarity=sim / 100.0)
1493 1493 except error.PatchError as e:
1494 1494 if not partial:
1495 1495 raise error.Abort(pycompat.bytestr(e))
1496 1496 if partial:
1497 1497 rejects = True
1498 1498
1499 1499 files = list(files)
1500 1500 if nocommit:
1501 1501 if message:
1502 1502 msgs.append(message)
1503 1503 else:
1504 1504 if opts.get('exact') or p2:
1505 1505 # If you got here, you either use --force and know what
1506 1506 # you are doing or used --exact or a merge patch while
1507 1507 # being updated to its first parent.
1508 1508 m = None
1509 1509 else:
1510 1510 m = scmutil.matchfiles(repo, files or [])
1511 1511 editform = mergeeditform(repo[None], 'import.normal')
1512 1512 if opts.get('exact'):
1513 1513 editor = None
1514 1514 else:
1515 1515 editor = getcommiteditor(editform=editform,
1516 1516 **pycompat.strkwargs(opts))
1517 1517 extra = {}
1518 1518 for idfunc in extrapreimport:
1519 1519 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1520 1520 overrides = {}
1521 1521 if partial:
1522 1522 overrides[('ui', 'allowemptycommit')] = True
1523 1523 with repo.ui.configoverride(overrides, 'import'):
1524 1524 n = repo.commit(message, user,
1525 1525 date, match=m,
1526 1526 editor=editor, extra=extra)
1527 1527 for idfunc in extrapostimport:
1528 1528 extrapostimportmap[idfunc](repo[n])
1529 1529 else:
1530 1530 if opts.get('exact') or importbranch:
1531 1531 branch = branch or 'default'
1532 1532 else:
1533 1533 branch = p1.branch()
1534 1534 store = patch.filestore()
1535 1535 try:
1536 1536 files = set()
1537 1537 try:
1538 1538 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1539 1539 files, eolmode=None)
1540 1540 except error.PatchError as e:
1541 1541 raise error.Abort(stringutil.forcebytestr(e))
1542 1542 if opts.get('exact'):
1543 1543 editor = None
1544 1544 else:
1545 1545 editor = getcommiteditor(editform='import.bypass')
1546 1546 memctx = context.memctx(repo, (p1.node(), p2.node()),
1547 1547 message,
1548 1548 files=files,
1549 1549 filectxfn=store,
1550 1550 user=user,
1551 1551 date=date,
1552 1552 branch=branch,
1553 1553 editor=editor)
1554 1554 n = memctx.commit()
1555 1555 finally:
1556 1556 store.close()
1557 1557 if opts.get('exact') and nocommit:
1558 1558 # --exact with --no-commit is still useful in that it does merge
1559 1559 # and branch bits
1560 1560 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1561 1561 elif opts.get('exact') and (not n or hex(n) != nodeid):
1562 1562 raise error.Abort(_('patch is damaged or loses information'))
1563 1563 msg = _('applied to working directory')
1564 1564 if n:
1565 1565 # i18n: refers to a short changeset id
1566 1566 msg = _('created %s') % short(n)
1567 1567 return msg, n, rejects
1568 1568
1569 1569 # facility to let extensions include additional data in an exported patch
1570 1570 # list of identifiers to be executed in order
1571 1571 extraexport = []
1572 1572 # mapping from identifier to actual export function
1573 1573 # function as to return a string to be added to the header or None
1574 1574 # it is given two arguments (sequencenumber, changectx)
1575 1575 extraexportmap = {}
1576 1576
1577 1577 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1578 1578 node = scmutil.binnode(ctx)
1579 1579 parents = [p.node() for p in ctx.parents() if p]
1580 1580 branch = ctx.branch()
1581 1581 if switch_parent:
1582 1582 parents.reverse()
1583 1583
1584 1584 if parents:
1585 1585 prev = parents[0]
1586 1586 else:
1587 1587 prev = nullid
1588 1588
1589 1589 fm.context(ctx=ctx)
1590 1590 fm.plain('# HG changeset patch\n')
1591 1591 fm.write('user', '# User %s\n', ctx.user())
1592 1592 fm.plain('# Date %d %d\n' % ctx.date())
1593 1593 fm.write('date', '# %s\n', fm.formatdate(ctx.date()))
1594 1594 fm.condwrite(branch and branch != 'default',
1595 1595 'branch', '# Branch %s\n', branch)
1596 1596 fm.write('node', '# Node ID %s\n', hex(node))
1597 1597 fm.plain('# Parent %s\n' % hex(prev))
1598 1598 if len(parents) > 1:
1599 1599 fm.plain('# Parent %s\n' % hex(parents[1]))
1600 1600 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
1601 1601
1602 1602 # TODO: redesign extraexportmap function to support formatter
1603 1603 for headerid in extraexport:
1604 1604 header = extraexportmap[headerid](seqno, ctx)
1605 1605 if header is not None:
1606 1606 fm.plain('# %s\n' % header)
1607 1607
1608 1608 fm.write('desc', '%s\n', ctx.description().rstrip())
1609 1609 fm.plain('\n')
1610 1610
1611 1611 if fm.isplain():
1612 1612 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1613 1613 for chunk, label in chunkiter:
1614 1614 fm.plain(chunk, label=label)
1615 1615 else:
1616 1616 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1617 1617 # TODO: make it structured?
1618 1618 fm.data(diff=b''.join(chunkiter))
1619 1619
1620 1620 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1621 1621 """Export changesets to stdout or a single file"""
1622 1622 for seqno, rev in enumerate(revs, 1):
1623 1623 ctx = repo[rev]
1624 1624 if not dest.startswith('<'):
1625 1625 repo.ui.note("%s\n" % dest)
1626 1626 fm.startitem()
1627 1627 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1628 1628
1629 1629 def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
1630 1630 match):
1631 1631 """Export changesets to possibly multiple files"""
1632 1632 total = len(revs)
1633 1633 revwidth = max(len(str(rev)) for rev in revs)
1634 1634 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1635 1635
1636 1636 for seqno, rev in enumerate(revs, 1):
1637 1637 ctx = repo[rev]
1638 1638 dest = makefilename(ctx, fntemplate,
1639 1639 total=total, seqno=seqno, revwidth=revwidth)
1640 1640 filemap.setdefault(dest, []).append((seqno, rev))
1641 1641
1642 1642 for dest in filemap:
1643 1643 with formatter.maybereopen(basefm, dest) as fm:
1644 1644 repo.ui.note("%s\n" % dest)
1645 1645 for seqno, rev in filemap[dest]:
1646 1646 fm.startitem()
1647 1647 ctx = repo[rev]
1648 1648 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1649 1649 diffopts)
1650 1650
1651 1651 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1652 1652 opts=None, match=None):
1653 1653 '''export changesets as hg patches
1654 1654
1655 1655 Args:
1656 1656 repo: The repository from which we're exporting revisions.
1657 1657 revs: A list of revisions to export as revision numbers.
1658 1658 basefm: A formatter to which patches should be written.
1659 1659 fntemplate: An optional string to use for generating patch file names.
1660 1660 switch_parent: If True, show diffs against second parent when not nullid.
1661 1661 Default is false, which always shows diff against p1.
1662 1662 opts: diff options to use for generating the patch.
1663 1663 match: If specified, only export changes to files matching this matcher.
1664 1664
1665 1665 Returns:
1666 1666 Nothing.
1667 1667
1668 1668 Side Effect:
1669 1669 "HG Changeset Patch" data is emitted to one of the following
1670 1670 destinations:
1671 1671 fntemplate specified: Each rev is written to a unique file named using
1672 1672 the given template.
1673 1673 Otherwise: All revs will be written to basefm.
1674 1674 '''
1675 1675 scmutil.prefetchfiles(repo, revs, match)
1676 1676
1677 1677 if not fntemplate:
1678 1678 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1679 1679 else:
1680 1680 _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
1681 1681 match)
1682 1682
1683 1683 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1684 1684 """Export changesets to the given file stream"""
1685 1685 scmutil.prefetchfiles(repo, revs, match)
1686 1686
1687 1687 dest = getattr(fp, 'name', '<unnamed>')
1688 1688 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1689 1689 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1690 1690
1691 1691 def showmarker(fm, marker, index=None):
1692 1692 """utility function to display obsolescence marker in a readable way
1693 1693
1694 1694 To be used by debug function."""
1695 1695 if index is not None:
1696 1696 fm.write('index', '%i ', index)
1697 1697 fm.write('prednode', '%s ', hex(marker.prednode()))
1698 1698 succs = marker.succnodes()
1699 1699 fm.condwrite(succs, 'succnodes', '%s ',
1700 1700 fm.formatlist(map(hex, succs), name='node'))
1701 1701 fm.write('flag', '%X ', marker.flags())
1702 1702 parents = marker.parentnodes()
1703 1703 if parents is not None:
1704 1704 fm.write('parentnodes', '{%s} ',
1705 1705 fm.formatlist(map(hex, parents), name='node', sep=', '))
1706 1706 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1707 1707 meta = marker.metadata().copy()
1708 1708 meta.pop('date', None)
1709 1709 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
1710 1710 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1711 1711 fm.plain('\n')
1712 1712
1713 1713 def finddate(ui, repo, date):
1714 1714 """Find the tipmost changeset that matches the given date spec"""
1715 1715
1716 1716 df = dateutil.matchdate(date)
1717 1717 m = scmutil.matchall(repo)
1718 1718 results = {}
1719 1719
1720 1720 def prep(ctx, fns):
1721 1721 d = ctx.date()
1722 1722 if df(d[0]):
1723 1723 results[ctx.rev()] = d
1724 1724
1725 1725 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1726 1726 rev = ctx.rev()
1727 1727 if rev in results:
1728 1728 ui.status(_("found revision %s from %s\n") %
1729 1729 (rev, dateutil.datestr(results[rev])))
1730 1730 return '%d' % rev
1731 1731
1732 1732 raise error.Abort(_("revision matching date not found"))
1733 1733
1734 1734 def increasingwindows(windowsize=8, sizelimit=512):
1735 1735 while True:
1736 1736 yield windowsize
1737 1737 if windowsize < sizelimit:
1738 1738 windowsize *= 2
1739 1739
1740 1740 def _walkrevs(repo, opts):
1741 1741 # Default --rev value depends on --follow but --follow behavior
1742 1742 # depends on revisions resolved from --rev...
1743 1743 follow = opts.get('follow') or opts.get('follow_first')
1744 1744 if opts.get('rev'):
1745 1745 revs = scmutil.revrange(repo, opts['rev'])
1746 1746 elif follow and repo.dirstate.p1() == nullid:
1747 1747 revs = smartset.baseset()
1748 1748 elif follow:
1749 1749 revs = repo.revs('reverse(:.)')
1750 1750 else:
1751 1751 revs = smartset.spanset(repo)
1752 1752 revs.reverse()
1753 1753 return revs
1754 1754
1755 1755 class FileWalkError(Exception):
1756 1756 pass
1757 1757
1758 1758 def walkfilerevs(repo, match, follow, revs, fncache):
1759 1759 '''Walks the file history for the matched files.
1760 1760
1761 1761 Returns the changeset revs that are involved in the file history.
1762 1762
1763 1763 Throws FileWalkError if the file history can't be walked using
1764 1764 filelogs alone.
1765 1765 '''
1766 1766 wanted = set()
1767 1767 copies = []
1768 1768 minrev, maxrev = min(revs), max(revs)
1769 1769 def filerevgen(filelog, last):
1770 1770 """
1771 1771 Only files, no patterns. Check the history of each file.
1772 1772
1773 1773 Examines filelog entries within minrev, maxrev linkrev range
1774 1774 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1775 1775 tuples in backwards order
1776 1776 """
1777 1777 cl_count = len(repo)
1778 1778 revs = []
1779 1779 for j in pycompat.xrange(0, last + 1):
1780 1780 linkrev = filelog.linkrev(j)
1781 1781 if linkrev < minrev:
1782 1782 continue
1783 1783 # only yield rev for which we have the changelog, it can
1784 1784 # happen while doing "hg log" during a pull or commit
1785 1785 if linkrev >= cl_count:
1786 1786 break
1787 1787
1788 1788 parentlinkrevs = []
1789 1789 for p in filelog.parentrevs(j):
1790 1790 if p != nullrev:
1791 1791 parentlinkrevs.append(filelog.linkrev(p))
1792 1792 n = filelog.node(j)
1793 1793 revs.append((linkrev, parentlinkrevs,
1794 1794 follow and filelog.renamed(n)))
1795 1795
1796 1796 return reversed(revs)
1797 1797 def iterfiles():
1798 1798 pctx = repo['.']
1799 1799 for filename in match.files():
1800 1800 if follow:
1801 1801 if filename not in pctx:
1802 1802 raise error.Abort(_('cannot follow file not in parent '
1803 1803 'revision: "%s"') % filename)
1804 1804 yield filename, pctx[filename].filenode()
1805 1805 else:
1806 1806 yield filename, None
1807 1807 for filename_node in copies:
1808 1808 yield filename_node
1809 1809
1810 1810 for file_, node in iterfiles():
1811 1811 filelog = repo.file(file_)
1812 1812 if not len(filelog):
1813 1813 if node is None:
1814 1814 # A zero count may be a directory or deleted file, so
1815 1815 # try to find matching entries on the slow path.
1816 1816 if follow:
1817 1817 raise error.Abort(
1818 1818 _('cannot follow nonexistent file: "%s"') % file_)
1819 1819 raise FileWalkError("Cannot walk via filelog")
1820 1820 else:
1821 1821 continue
1822 1822
1823 1823 if node is None:
1824 1824 last = len(filelog) - 1
1825 1825 else:
1826 1826 last = filelog.rev(node)
1827 1827
1828 1828 # keep track of all ancestors of the file
1829 1829 ancestors = {filelog.linkrev(last)}
1830 1830
1831 1831 # iterate from latest to oldest revision
1832 1832 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1833 1833 if not follow:
1834 1834 if rev > maxrev:
1835 1835 continue
1836 1836 else:
1837 1837 # Note that last might not be the first interesting
1838 1838 # rev to us:
1839 1839 # if the file has been changed after maxrev, we'll
1840 1840 # have linkrev(last) > maxrev, and we still need
1841 1841 # to explore the file graph
1842 1842 if rev not in ancestors:
1843 1843 continue
1844 1844 # XXX insert 1327 fix here
1845 1845 if flparentlinkrevs:
1846 1846 ancestors.update(flparentlinkrevs)
1847 1847
1848 1848 fncache.setdefault(rev, []).append(file_)
1849 1849 wanted.add(rev)
1850 1850 if copied:
1851 1851 copies.append(copied)
1852 1852
1853 1853 return wanted
1854 1854
1855 1855 class _followfilter(object):
1856 1856 def __init__(self, repo, onlyfirst=False):
1857 1857 self.repo = repo
1858 1858 self.startrev = nullrev
1859 1859 self.roots = set()
1860 1860 self.onlyfirst = onlyfirst
1861 1861
1862 1862 def match(self, rev):
1863 1863 def realparents(rev):
1864 1864 if self.onlyfirst:
1865 1865 return self.repo.changelog.parentrevs(rev)[0:1]
1866 1866 else:
1867 1867 return filter(lambda x: x != nullrev,
1868 1868 self.repo.changelog.parentrevs(rev))
1869 1869
1870 1870 if self.startrev == nullrev:
1871 1871 self.startrev = rev
1872 1872 return True
1873 1873
1874 1874 if rev > self.startrev:
1875 1875 # forward: all descendants
1876 1876 if not self.roots:
1877 1877 self.roots.add(self.startrev)
1878 1878 for parent in realparents(rev):
1879 1879 if parent in self.roots:
1880 1880 self.roots.add(rev)
1881 1881 return True
1882 1882 else:
1883 1883 # backwards: all parents
1884 1884 if not self.roots:
1885 1885 self.roots.update(realparents(self.startrev))
1886 1886 if rev in self.roots:
1887 1887 self.roots.remove(rev)
1888 1888 self.roots.update(realparents(rev))
1889 1889 return True
1890 1890
1891 1891 return False
1892 1892
1893 1893 def walkchangerevs(repo, match, opts, prepare):
1894 1894 '''Iterate over files and the revs in which they changed.
1895 1895
1896 1896 Callers most commonly need to iterate backwards over the history
1897 1897 in which they are interested. Doing so has awful (quadratic-looking)
1898 1898 performance, so we use iterators in a "windowed" way.
1899 1899
1900 1900 We walk a window of revisions in the desired order. Within the
1901 1901 window, we first walk forwards to gather data, then in the desired
1902 1902 order (usually backwards) to display it.
1903 1903
1904 1904 This function returns an iterator yielding contexts. Before
1905 1905 yielding each context, the iterator will first call the prepare
1906 1906 function on each context in the window in forward order.'''
1907 1907
1908 1908 allfiles = opts.get('all_files')
1909 1909 follow = opts.get('follow') or opts.get('follow_first')
1910 1910 revs = _walkrevs(repo, opts)
1911 1911 if not revs:
1912 1912 return []
1913 1913 wanted = set()
1914 1914 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1915 1915 fncache = {}
1916 1916 change = repo.__getitem__
1917 1917
1918 1918 # First step is to fill wanted, the set of revisions that we want to yield.
1919 1919 # When it does not induce extra cost, we also fill fncache for revisions in
1920 1920 # wanted: a cache of filenames that were changed (ctx.files()) and that
1921 1921 # match the file filtering conditions.
1922 1922
1923 1923 if match.always() or allfiles:
1924 1924 # No files, no patterns. Display all revs.
1925 1925 wanted = revs
1926 1926 elif not slowpath:
1927 1927 # We only have to read through the filelog to find wanted revisions
1928 1928
1929 1929 try:
1930 1930 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1931 1931 except FileWalkError:
1932 1932 slowpath = True
1933 1933
1934 1934 # We decided to fall back to the slowpath because at least one
1935 1935 # of the paths was not a file. Check to see if at least one of them
1936 1936 # existed in history, otherwise simply return
1937 1937 for path in match.files():
1938 1938 if path == '.' or path in repo.store:
1939 1939 break
1940 1940 else:
1941 1941 return []
1942 1942
1943 1943 if slowpath:
1944 1944 # We have to read the changelog to match filenames against
1945 1945 # changed files
1946 1946
1947 1947 if follow:
1948 1948 raise error.Abort(_('can only follow copies/renames for explicit '
1949 1949 'filenames'))
1950 1950
1951 1951 # The slow path checks files modified in every changeset.
1952 1952 # This is really slow on large repos, so compute the set lazily.
1953 1953 class lazywantedset(object):
1954 1954 def __init__(self):
1955 1955 self.set = set()
1956 1956 self.revs = set(revs)
1957 1957
1958 1958 # No need to worry about locality here because it will be accessed
1959 1959 # in the same order as the increasing window below.
1960 1960 def __contains__(self, value):
1961 1961 if value in self.set:
1962 1962 return True
1963 1963 elif not value in self.revs:
1964 1964 return False
1965 1965 else:
1966 1966 self.revs.discard(value)
1967 1967 ctx = change(value)
1968 1968 matches = [f for f in ctx.files() if match(f)]
1969 1969 if matches:
1970 1970 fncache[value] = matches
1971 1971 self.set.add(value)
1972 1972 return True
1973 1973 return False
1974 1974
1975 1975 def discard(self, value):
1976 1976 self.revs.discard(value)
1977 1977 self.set.discard(value)
1978 1978
1979 1979 wanted = lazywantedset()
1980 1980
1981 1981 # it might be worthwhile to do this in the iterator if the rev range
1982 1982 # is descending and the prune args are all within that range
1983 1983 for rev in opts.get('prune', ()):
1984 1984 rev = repo[rev].rev()
1985 1985 ff = _followfilter(repo)
1986 1986 stop = min(revs[0], revs[-1])
1987 1987 for x in pycompat.xrange(rev, stop - 1, -1):
1988 1988 if ff.match(x):
1989 1989 wanted = wanted - [x]
1990 1990
1991 1991 # Now that wanted is correctly initialized, we can iterate over the
1992 1992 # revision range, yielding only revisions in wanted.
1993 1993 def iterate():
1994 1994 if follow and match.always():
1995 1995 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1996 1996 def want(rev):
1997 1997 return ff.match(rev) and rev in wanted
1998 1998 else:
1999 1999 def want(rev):
2000 2000 return rev in wanted
2001 2001
2002 2002 it = iter(revs)
2003 2003 stopiteration = False
2004 2004 for windowsize in increasingwindows():
2005 2005 nrevs = []
2006 2006 for i in pycompat.xrange(windowsize):
2007 2007 rev = next(it, None)
2008 2008 if rev is None:
2009 2009 stopiteration = True
2010 2010 break
2011 2011 elif want(rev):
2012 2012 nrevs.append(rev)
2013 2013 for rev in sorted(nrevs):
2014 2014 fns = fncache.get(rev)
2015 2015 ctx = change(rev)
2016 2016 if not fns:
2017 2017 def fns_generator():
2018 2018 if allfiles:
2019 2019 fiter = iter(ctx)
2020 2020 else:
2021 2021 fiter = ctx.files()
2022 2022 for f in fiter:
2023 2023 if match(f):
2024 2024 yield f
2025 2025 fns = fns_generator()
2026 2026 prepare(ctx, fns)
2027 2027 for rev in nrevs:
2028 2028 yield change(rev)
2029 2029
2030 2030 if stopiteration:
2031 2031 break
2032 2032
2033 2033 return iterate()
2034 2034
2035 2035 def add(ui, repo, match, prefix, explicitonly, **opts):
2036 2036 join = lambda f: os.path.join(prefix, f)
2037 2037 bad = []
2038 2038
2039 2039 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2040 2040 names = []
2041 2041 wctx = repo[None]
2042 2042 cca = None
2043 2043 abort, warn = scmutil.checkportabilityalert(ui)
2044 2044 if abort or warn:
2045 2045 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2046 2046
2047 match = repo.narrowmatch(match, includeexact=True)
2047 2048 badmatch = matchmod.badmatch(match, badfn)
2048 2049 dirstate = repo.dirstate
2049 2050 # We don't want to just call wctx.walk here, since it would return a lot of
2050 2051 # clean files, which we aren't interested in and takes time.
2051 2052 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2052 2053 unknown=True, ignored=False, full=False)):
2053 2054 exact = match.exact(f)
2054 2055 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2055 2056 if cca:
2056 2057 cca(f)
2057 2058 names.append(f)
2058 2059 if ui.verbose or not exact:
2059 2060 ui.status(_('adding %s\n') % match.rel(f),
2060 2061 label='addremove.added')
2061 2062
2062 2063 for subpath in sorted(wctx.substate):
2063 2064 sub = wctx.sub(subpath)
2064 2065 try:
2065 2066 submatch = matchmod.subdirmatcher(subpath, match)
2066 2067 if opts.get(r'subrepos'):
2067 2068 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2068 2069 else:
2069 2070 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2070 2071 except error.LookupError:
2071 2072 ui.status(_("skipping missing subrepository: %s\n")
2072 2073 % join(subpath))
2073 2074
2074 2075 if not opts.get(r'dry_run'):
2075 2076 rejected = wctx.add(names, prefix)
2076 2077 bad.extend(f for f in rejected if f in match.files())
2077 2078 return bad
2078 2079
2079 2080 def addwebdirpath(repo, serverpath, webconf):
2080 2081 webconf[serverpath] = repo.root
2081 2082 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2082 2083
2083 2084 for r in repo.revs('filelog("path:.hgsub")'):
2084 2085 ctx = repo[r]
2085 2086 for subpath in ctx.substate:
2086 2087 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2087 2088
2088 2089 def forget(ui, repo, match, prefix, explicitonly, dryrun, interactive):
2089 2090 if dryrun and interactive:
2090 2091 raise error.Abort(_("cannot specify both --dry-run and --interactive"))
2091 2092 join = lambda f: os.path.join(prefix, f)
2092 2093 bad = []
2093 2094 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2094 2095 wctx = repo[None]
2095 2096 forgot = []
2096 2097
2097 2098 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2098 2099 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2099 2100 if explicitonly:
2100 2101 forget = [f for f in forget if match.exact(f)]
2101 2102
2102 2103 for subpath in sorted(wctx.substate):
2103 2104 sub = wctx.sub(subpath)
2104 2105 try:
2105 2106 submatch = matchmod.subdirmatcher(subpath, match)
2106 2107 subbad, subforgot = sub.forget(submatch, prefix, dryrun=dryrun,
2107 2108 interactive=interactive)
2108 2109 bad.extend([subpath + '/' + f for f in subbad])
2109 2110 forgot.extend([subpath + '/' + f for f in subforgot])
2110 2111 except error.LookupError:
2111 2112 ui.status(_("skipping missing subrepository: %s\n")
2112 2113 % join(subpath))
2113 2114
2114 2115 if not explicitonly:
2115 2116 for f in match.files():
2116 2117 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2117 2118 if f not in forgot:
2118 2119 if repo.wvfs.exists(f):
2119 2120 # Don't complain if the exact case match wasn't given.
2120 2121 # But don't do this until after checking 'forgot', so
2121 2122 # that subrepo files aren't normalized, and this op is
2122 2123 # purely from data cached by the status walk above.
2123 2124 if repo.dirstate.normalize(f) in repo.dirstate:
2124 2125 continue
2125 2126 ui.warn(_('not removing %s: '
2126 2127 'file is already untracked\n')
2127 2128 % match.rel(f))
2128 2129 bad.append(f)
2129 2130
2130 2131 if interactive:
2131 2132 responses = _('[Ynsa?]'
2132 2133 '$$ &Yes, forget this file'
2133 2134 '$$ &No, skip this file'
2134 2135 '$$ &Skip remaining files'
2135 2136 '$$ Include &all remaining files'
2136 2137 '$$ &? (display help)')
2137 2138 for filename in forget[:]:
2138 2139 r = ui.promptchoice(_('forget %s %s') % (filename, responses))
2139 2140 if r == 4: # ?
2140 2141 while r == 4:
2141 2142 for c, t in ui.extractchoices(responses)[1]:
2142 2143 ui.write('%s - %s\n' % (c, encoding.lower(t)))
2143 2144 r = ui.promptchoice(_('forget %s %s') % (filename,
2144 2145 responses))
2145 2146 if r == 0: # yes
2146 2147 continue
2147 2148 elif r == 1: # no
2148 2149 forget.remove(filename)
2149 2150 elif r == 2: # Skip
2150 2151 fnindex = forget.index(filename)
2151 2152 del forget[fnindex:]
2152 2153 break
2153 2154 elif r == 3: # All
2154 2155 break
2155 2156
2156 2157 for f in forget:
2157 2158 if ui.verbose or not match.exact(f) or interactive:
2158 2159 ui.status(_('removing %s\n') % match.rel(f),
2159 2160 label='addremove.removed')
2160 2161
2161 2162 if not dryrun:
2162 2163 rejected = wctx.forget(forget, prefix)
2163 2164 bad.extend(f for f in rejected if f in match.files())
2164 2165 forgot.extend(f for f in forget if f not in rejected)
2165 2166 return bad, forgot
2166 2167
2167 2168 def files(ui, ctx, m, fm, fmt, subrepos):
2168 2169 ret = 1
2169 2170
2170 2171 needsfctx = ui.verbose or {'size', 'flags'} & fm.datahint()
2171 2172 for f in ctx.matches(m):
2172 2173 fm.startitem()
2173 2174 fm.context(ctx=ctx)
2174 2175 if needsfctx:
2175 2176 fc = ctx[f]
2176 2177 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2177 2178 fm.data(path=f)
2178 2179 fm.plain(fmt % m.rel(f))
2179 2180 ret = 0
2180 2181
2181 2182 for subpath in sorted(ctx.substate):
2182 2183 submatch = matchmod.subdirmatcher(subpath, m)
2183 2184 if (subrepos or m.exact(subpath) or any(submatch.files())):
2184 2185 sub = ctx.sub(subpath)
2185 2186 try:
2186 2187 recurse = m.exact(subpath) or subrepos
2187 2188 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2188 2189 ret = 0
2189 2190 except error.LookupError:
2190 2191 ui.status(_("skipping missing subrepository: %s\n")
2191 2192 % m.abs(subpath))
2192 2193
2193 2194 return ret
2194 2195
2195 2196 def remove(ui, repo, m, prefix, after, force, subrepos, dryrun, warnings=None):
2196 2197 join = lambda f: os.path.join(prefix, f)
2197 2198 ret = 0
2198 2199 s = repo.status(match=m, clean=True)
2199 2200 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2200 2201
2201 2202 wctx = repo[None]
2202 2203
2203 2204 if warnings is None:
2204 2205 warnings = []
2205 2206 warn = True
2206 2207 else:
2207 2208 warn = False
2208 2209
2209 2210 subs = sorted(wctx.substate)
2210 2211 progress = ui.makeprogress(_('searching'), total=len(subs),
2211 2212 unit=_('subrepos'))
2212 2213 for subpath in subs:
2213 2214 submatch = matchmod.subdirmatcher(subpath, m)
2214 2215 if subrepos or m.exact(subpath) or any(submatch.files()):
2215 2216 progress.increment()
2216 2217 sub = wctx.sub(subpath)
2217 2218 try:
2218 2219 if sub.removefiles(submatch, prefix, after, force, subrepos,
2219 2220 dryrun, warnings):
2220 2221 ret = 1
2221 2222 except error.LookupError:
2222 2223 warnings.append(_("skipping missing subrepository: %s\n")
2223 2224 % join(subpath))
2224 2225 progress.complete()
2225 2226
2226 2227 # warn about failure to delete explicit files/dirs
2227 2228 deleteddirs = util.dirs(deleted)
2228 2229 files = m.files()
2229 2230 progress = ui.makeprogress(_('deleting'), total=len(files),
2230 2231 unit=_('files'))
2231 2232 for f in files:
2232 2233 def insubrepo():
2233 2234 for subpath in wctx.substate:
2234 2235 if f.startswith(subpath + '/'):
2235 2236 return True
2236 2237 return False
2237 2238
2238 2239 progress.increment()
2239 2240 isdir = f in deleteddirs or wctx.hasdir(f)
2240 2241 if (f in repo.dirstate or isdir or f == '.'
2241 2242 or insubrepo() or f in subs):
2242 2243 continue
2243 2244
2244 2245 if repo.wvfs.exists(f):
2245 2246 if repo.wvfs.isdir(f):
2246 2247 warnings.append(_('not removing %s: no tracked files\n')
2247 2248 % m.rel(f))
2248 2249 else:
2249 2250 warnings.append(_('not removing %s: file is untracked\n')
2250 2251 % m.rel(f))
2251 2252 # missing files will generate a warning elsewhere
2252 2253 ret = 1
2253 2254 progress.complete()
2254 2255
2255 2256 if force:
2256 2257 list = modified + deleted + clean + added
2257 2258 elif after:
2258 2259 list = deleted
2259 2260 remaining = modified + added + clean
2260 2261 progress = ui.makeprogress(_('skipping'), total=len(remaining),
2261 2262 unit=_('files'))
2262 2263 for f in remaining:
2263 2264 progress.increment()
2264 2265 if ui.verbose or (f in files):
2265 2266 warnings.append(_('not removing %s: file still exists\n')
2266 2267 % m.rel(f))
2267 2268 ret = 1
2268 2269 progress.complete()
2269 2270 else:
2270 2271 list = deleted + clean
2271 2272 progress = ui.makeprogress(_('skipping'),
2272 2273 total=(len(modified) + len(added)),
2273 2274 unit=_('files'))
2274 2275 for f in modified:
2275 2276 progress.increment()
2276 2277 warnings.append(_('not removing %s: file is modified (use -f'
2277 2278 ' to force removal)\n') % m.rel(f))
2278 2279 ret = 1
2279 2280 for f in added:
2280 2281 progress.increment()
2281 2282 warnings.append(_("not removing %s: file has been marked for add"
2282 2283 " (use 'hg forget' to undo add)\n") % m.rel(f))
2283 2284 ret = 1
2284 2285 progress.complete()
2285 2286
2286 2287 list = sorted(list)
2287 2288 progress = ui.makeprogress(_('deleting'), total=len(list),
2288 2289 unit=_('files'))
2289 2290 for f in list:
2290 2291 if ui.verbose or not m.exact(f):
2291 2292 progress.increment()
2292 2293 ui.status(_('removing %s\n') % m.rel(f),
2293 2294 label='addremove.removed')
2294 2295 progress.complete()
2295 2296
2296 2297 if not dryrun:
2297 2298 with repo.wlock():
2298 2299 if not after:
2299 2300 for f in list:
2300 2301 if f in added:
2301 2302 continue # we never unlink added files on remove
2302 2303 rmdir = repo.ui.configbool('experimental',
2303 2304 'removeemptydirs')
2304 2305 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2305 2306 repo[None].forget(list)
2306 2307
2307 2308 if warn:
2308 2309 for warning in warnings:
2309 2310 ui.warn(warning)
2310 2311
2311 2312 return ret
2312 2313
2313 2314 def _updatecatformatter(fm, ctx, matcher, path, decode):
2314 2315 """Hook for adding data to the formatter used by ``hg cat``.
2315 2316
2316 2317 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2317 2318 this method first."""
2318 2319 data = ctx[path].data()
2319 2320 if decode:
2320 2321 data = ctx.repo().wwritedata(path, data)
2321 2322 fm.startitem()
2322 2323 fm.context(ctx=ctx)
2323 2324 fm.write('data', '%s', data)
2324 2325 fm.data(path=path)
2325 2326
2326 2327 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2327 2328 err = 1
2328 2329 opts = pycompat.byteskwargs(opts)
2329 2330
2330 2331 def write(path):
2331 2332 filename = None
2332 2333 if fntemplate:
2333 2334 filename = makefilename(ctx, fntemplate,
2334 2335 pathname=os.path.join(prefix, path))
2335 2336 # attempt to create the directory if it does not already exist
2336 2337 try:
2337 2338 os.makedirs(os.path.dirname(filename))
2338 2339 except OSError:
2339 2340 pass
2340 2341 with formatter.maybereopen(basefm, filename) as fm:
2341 2342 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2342 2343
2343 2344 # Automation often uses hg cat on single files, so special case it
2344 2345 # for performance to avoid the cost of parsing the manifest.
2345 2346 if len(matcher.files()) == 1 and not matcher.anypats():
2346 2347 file = matcher.files()[0]
2347 2348 mfl = repo.manifestlog
2348 2349 mfnode = ctx.manifestnode()
2349 2350 try:
2350 2351 if mfnode and mfl[mfnode].find(file)[0]:
2351 2352 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2352 2353 write(file)
2353 2354 return 0
2354 2355 except KeyError:
2355 2356 pass
2356 2357
2357 2358 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2358 2359
2359 2360 for abs in ctx.walk(matcher):
2360 2361 write(abs)
2361 2362 err = 0
2362 2363
2363 2364 for subpath in sorted(ctx.substate):
2364 2365 sub = ctx.sub(subpath)
2365 2366 try:
2366 2367 submatch = matchmod.subdirmatcher(subpath, matcher)
2367 2368
2368 2369 if not sub.cat(submatch, basefm, fntemplate,
2369 2370 os.path.join(prefix, sub._path),
2370 2371 **pycompat.strkwargs(opts)):
2371 2372 err = 0
2372 2373 except error.RepoLookupError:
2373 2374 ui.status(_("skipping missing subrepository: %s\n")
2374 2375 % os.path.join(prefix, subpath))
2375 2376
2376 2377 return err
2377 2378
2378 2379 def commit(ui, repo, commitfunc, pats, opts):
2379 2380 '''commit the specified files or all outstanding changes'''
2380 2381 date = opts.get('date')
2381 2382 if date:
2382 2383 opts['date'] = dateutil.parsedate(date)
2383 2384 message = logmessage(ui, opts)
2384 2385 matcher = scmutil.match(repo[None], pats, opts)
2385 2386
2386 2387 dsguard = None
2387 2388 # extract addremove carefully -- this function can be called from a command
2388 2389 # that doesn't support addremove
2389 2390 if opts.get('addremove'):
2390 2391 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2391 2392 with dsguard or util.nullcontextmanager():
2392 2393 if dsguard:
2393 2394 if scmutil.addremove(repo, matcher, "", opts) != 0:
2394 2395 raise error.Abort(
2395 2396 _("failed to mark all new/missing files as added/removed"))
2396 2397
2397 2398 return commitfunc(ui, repo, message, matcher, opts)
2398 2399
2399 2400 def samefile(f, ctx1, ctx2):
2400 2401 if f in ctx1.manifest():
2401 2402 a = ctx1.filectx(f)
2402 2403 if f in ctx2.manifest():
2403 2404 b = ctx2.filectx(f)
2404 2405 return (not a.cmp(b)
2405 2406 and a.flags() == b.flags())
2406 2407 else:
2407 2408 return False
2408 2409 else:
2409 2410 return f not in ctx2.manifest()
2410 2411
2411 2412 def amend(ui, repo, old, extra, pats, opts):
2412 2413 # avoid cycle context -> subrepo -> cmdutil
2413 2414 from . import context
2414 2415
2415 2416 # amend will reuse the existing user if not specified, but the obsolete
2416 2417 # marker creation requires that the current user's name is specified.
2417 2418 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2418 2419 ui.username() # raise exception if username not set
2419 2420
2420 2421 ui.note(_('amending changeset %s\n') % old)
2421 2422 base = old.p1()
2422 2423
2423 2424 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2424 2425 # Participating changesets:
2425 2426 #
2426 2427 # wctx o - workingctx that contains changes from working copy
2427 2428 # | to go into amending commit
2428 2429 # |
2429 2430 # old o - changeset to amend
2430 2431 # |
2431 2432 # base o - first parent of the changeset to amend
2432 2433 wctx = repo[None]
2433 2434
2434 2435 # Copy to avoid mutating input
2435 2436 extra = extra.copy()
2436 2437 # Update extra dict from amended commit (e.g. to preserve graft
2437 2438 # source)
2438 2439 extra.update(old.extra())
2439 2440
2440 2441 # Also update it from the from the wctx
2441 2442 extra.update(wctx.extra())
2442 2443
2443 2444 user = opts.get('user') or old.user()
2444 2445 date = opts.get('date') or old.date()
2445 2446
2446 2447 # Parse the date to allow comparison between date and old.date()
2447 2448 date = dateutil.parsedate(date)
2448 2449
2449 2450 if len(old.parents()) > 1:
2450 2451 # ctx.files() isn't reliable for merges, so fall back to the
2451 2452 # slower repo.status() method
2452 2453 files = set([fn for st in base.status(old)[:3]
2453 2454 for fn in st])
2454 2455 else:
2455 2456 files = set(old.files())
2456 2457
2457 2458 # add/remove the files to the working copy if the "addremove" option
2458 2459 # was specified.
2459 2460 matcher = scmutil.match(wctx, pats, opts)
2460 2461 if (opts.get('addremove')
2461 2462 and scmutil.addremove(repo, matcher, "", opts)):
2462 2463 raise error.Abort(
2463 2464 _("failed to mark all new/missing files as added/removed"))
2464 2465
2465 2466 # Check subrepos. This depends on in-place wctx._status update in
2466 2467 # subrepo.precommit(). To minimize the risk of this hack, we do
2467 2468 # nothing if .hgsub does not exist.
2468 2469 if '.hgsub' in wctx or '.hgsub' in old:
2469 2470 subs, commitsubs, newsubstate = subrepoutil.precommit(
2470 2471 ui, wctx, wctx._status, matcher)
2471 2472 # amend should abort if commitsubrepos is enabled
2472 2473 assert not commitsubs
2473 2474 if subs:
2474 2475 subrepoutil.writestate(repo, newsubstate)
2475 2476
2476 2477 ms = mergemod.mergestate.read(repo)
2477 2478 mergeutil.checkunresolved(ms)
2478 2479
2479 2480 filestoamend = set(f for f in wctx.files() if matcher(f))
2480 2481
2481 2482 changes = (len(filestoamend) > 0)
2482 2483 if changes:
2483 2484 # Recompute copies (avoid recording a -> b -> a)
2484 2485 copied = copies.pathcopies(base, wctx, matcher)
2485 2486 if old.p2:
2486 2487 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2487 2488
2488 2489 # Prune files which were reverted by the updates: if old
2489 2490 # introduced file X and the file was renamed in the working
2490 2491 # copy, then those two files are the same and
2491 2492 # we can discard X from our list of files. Likewise if X
2492 2493 # was removed, it's no longer relevant. If X is missing (aka
2493 2494 # deleted), old X must be preserved.
2494 2495 files.update(filestoamend)
2495 2496 files = [f for f in files if (not samefile(f, wctx, base)
2496 2497 or f in wctx.deleted())]
2497 2498
2498 2499 def filectxfn(repo, ctx_, path):
2499 2500 try:
2500 2501 # If the file being considered is not amongst the files
2501 2502 # to be amended, we should return the file context from the
2502 2503 # old changeset. This avoids issues when only some files in
2503 2504 # the working copy are being amended but there are also
2504 2505 # changes to other files from the old changeset.
2505 2506 if path not in filestoamend:
2506 2507 return old.filectx(path)
2507 2508
2508 2509 # Return None for removed files.
2509 2510 if path in wctx.removed():
2510 2511 return None
2511 2512
2512 2513 fctx = wctx[path]
2513 2514 flags = fctx.flags()
2514 2515 mctx = context.memfilectx(repo, ctx_,
2515 2516 fctx.path(), fctx.data(),
2516 2517 islink='l' in flags,
2517 2518 isexec='x' in flags,
2518 2519 copied=copied.get(path))
2519 2520 return mctx
2520 2521 except KeyError:
2521 2522 return None
2522 2523 else:
2523 2524 ui.note(_('copying changeset %s to %s\n') % (old, base))
2524 2525
2525 2526 # Use version of files as in the old cset
2526 2527 def filectxfn(repo, ctx_, path):
2527 2528 try:
2528 2529 return old.filectx(path)
2529 2530 except KeyError:
2530 2531 return None
2531 2532
2532 2533 # See if we got a message from -m or -l, if not, open the editor with
2533 2534 # the message of the changeset to amend.
2534 2535 message = logmessage(ui, opts)
2535 2536
2536 2537 editform = mergeeditform(old, 'commit.amend')
2537 2538 editor = getcommiteditor(editform=editform,
2538 2539 **pycompat.strkwargs(opts))
2539 2540
2540 2541 if not message:
2541 2542 editor = getcommiteditor(edit=True, editform=editform)
2542 2543 message = old.description()
2543 2544
2544 2545 pureextra = extra.copy()
2545 2546 extra['amend_source'] = old.hex()
2546 2547
2547 2548 new = context.memctx(repo,
2548 2549 parents=[base.node(), old.p2().node()],
2549 2550 text=message,
2550 2551 files=files,
2551 2552 filectxfn=filectxfn,
2552 2553 user=user,
2553 2554 date=date,
2554 2555 extra=extra,
2555 2556 editor=editor)
2556 2557
2557 2558 newdesc = changelog.stripdesc(new.description())
2558 2559 if ((not changes)
2559 2560 and newdesc == old.description()
2560 2561 and user == old.user()
2561 2562 and date == old.date()
2562 2563 and pureextra == old.extra()):
2563 2564 # nothing changed. continuing here would create a new node
2564 2565 # anyway because of the amend_source noise.
2565 2566 #
2566 2567 # This not what we expect from amend.
2567 2568 return old.node()
2568 2569
2569 2570 commitphase = None
2570 2571 if opts.get('secret'):
2571 2572 commitphase = phases.secret
2572 2573 newid = repo.commitctx(new)
2573 2574
2574 2575 # Reroute the working copy parent to the new changeset
2575 2576 repo.setparents(newid, nullid)
2576 2577 mapping = {old.node(): (newid,)}
2577 2578 obsmetadata = None
2578 2579 if opts.get('note'):
2579 2580 obsmetadata = {'note': encoding.fromlocal(opts['note'])}
2580 2581 backup = ui.configbool('ui', 'history-editing-backup')
2581 2582 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
2582 2583 fixphase=True, targetphase=commitphase,
2583 2584 backup=backup)
2584 2585
2585 2586 # Fixing the dirstate because localrepo.commitctx does not update
2586 2587 # it. This is rather convenient because we did not need to update
2587 2588 # the dirstate for all the files in the new commit which commitctx
2588 2589 # could have done if it updated the dirstate. Now, we can
2589 2590 # selectively update the dirstate only for the amended files.
2590 2591 dirstate = repo.dirstate
2591 2592
2592 2593 # Update the state of the files which were added and
2593 2594 # and modified in the amend to "normal" in the dirstate.
2594 2595 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2595 2596 for f in normalfiles:
2596 2597 dirstate.normal(f)
2597 2598
2598 2599 # Update the state of files which were removed in the amend
2599 2600 # to "removed" in the dirstate.
2600 2601 removedfiles = set(wctx.removed()) & filestoamend
2601 2602 for f in removedfiles:
2602 2603 dirstate.drop(f)
2603 2604
2604 2605 return newid
2605 2606
2606 2607 def commiteditor(repo, ctx, subs, editform=''):
2607 2608 if ctx.description():
2608 2609 return ctx.description()
2609 2610 return commitforceeditor(repo, ctx, subs, editform=editform,
2610 2611 unchangedmessagedetection=True)
2611 2612
2612 2613 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2613 2614 editform='', unchangedmessagedetection=False):
2614 2615 if not extramsg:
2615 2616 extramsg = _("Leave message empty to abort commit.")
2616 2617
2617 2618 forms = [e for e in editform.split('.') if e]
2618 2619 forms.insert(0, 'changeset')
2619 2620 templatetext = None
2620 2621 while forms:
2621 2622 ref = '.'.join(forms)
2622 2623 if repo.ui.config('committemplate', ref):
2623 2624 templatetext = committext = buildcommittemplate(
2624 2625 repo, ctx, subs, extramsg, ref)
2625 2626 break
2626 2627 forms.pop()
2627 2628 else:
2628 2629 committext = buildcommittext(repo, ctx, subs, extramsg)
2629 2630
2630 2631 # run editor in the repository root
2631 2632 olddir = encoding.getcwd()
2632 2633 os.chdir(repo.root)
2633 2634
2634 2635 # make in-memory changes visible to external process
2635 2636 tr = repo.currenttransaction()
2636 2637 repo.dirstate.write(tr)
2637 2638 pending = tr and tr.writepending() and repo.root
2638 2639
2639 2640 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2640 2641 editform=editform, pending=pending,
2641 2642 repopath=repo.path, action='commit')
2642 2643 text = editortext
2643 2644
2644 2645 # strip away anything below this special string (used for editors that want
2645 2646 # to display the diff)
2646 2647 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2647 2648 if stripbelow:
2648 2649 text = text[:stripbelow.start()]
2649 2650
2650 2651 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2651 2652 os.chdir(olddir)
2652 2653
2653 2654 if finishdesc:
2654 2655 text = finishdesc(text)
2655 2656 if not text.strip():
2656 2657 raise error.Abort(_("empty commit message"))
2657 2658 if unchangedmessagedetection and editortext == templatetext:
2658 2659 raise error.Abort(_("commit message unchanged"))
2659 2660
2660 2661 return text
2661 2662
2662 2663 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2663 2664 ui = repo.ui
2664 2665 spec = formatter.templatespec(ref, None, None)
2665 2666 t = logcmdutil.changesettemplater(ui, repo, spec)
2666 2667 t.t.cache.update((k, templater.unquotestring(v))
2667 2668 for k, v in repo.ui.configitems('committemplate'))
2668 2669
2669 2670 if not extramsg:
2670 2671 extramsg = '' # ensure that extramsg is string
2671 2672
2672 2673 ui.pushbuffer()
2673 2674 t.show(ctx, extramsg=extramsg)
2674 2675 return ui.popbuffer()
2675 2676
2676 2677 def hgprefix(msg):
2677 2678 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2678 2679
2679 2680 def buildcommittext(repo, ctx, subs, extramsg):
2680 2681 edittext = []
2681 2682 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2682 2683 if ctx.description():
2683 2684 edittext.append(ctx.description())
2684 2685 edittext.append("")
2685 2686 edittext.append("") # Empty line between message and comments.
2686 2687 edittext.append(hgprefix(_("Enter commit message."
2687 2688 " Lines beginning with 'HG:' are removed.")))
2688 2689 edittext.append(hgprefix(extramsg))
2689 2690 edittext.append("HG: --")
2690 2691 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2691 2692 if ctx.p2():
2692 2693 edittext.append(hgprefix(_("branch merge")))
2693 2694 if ctx.branch():
2694 2695 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2695 2696 if bookmarks.isactivewdirparent(repo):
2696 2697 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2697 2698 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2698 2699 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2699 2700 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2700 2701 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2701 2702 if not added and not modified and not removed:
2702 2703 edittext.append(hgprefix(_("no files changed")))
2703 2704 edittext.append("")
2704 2705
2705 2706 return "\n".join(edittext)
2706 2707
2707 2708 def commitstatus(repo, node, branch, bheads=None, opts=None):
2708 2709 if opts is None:
2709 2710 opts = {}
2710 2711 ctx = repo[node]
2711 2712 parents = ctx.parents()
2712 2713
2713 2714 if (not opts.get('amend') and bheads and node not in bheads and not
2714 2715 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2715 2716 repo.ui.status(_('created new head\n'))
2716 2717 # The message is not printed for initial roots. For the other
2717 2718 # changesets, it is printed in the following situations:
2718 2719 #
2719 2720 # Par column: for the 2 parents with ...
2720 2721 # N: null or no parent
2721 2722 # B: parent is on another named branch
2722 2723 # C: parent is a regular non head changeset
2723 2724 # H: parent was a branch head of the current branch
2724 2725 # Msg column: whether we print "created new head" message
2725 2726 # In the following, it is assumed that there already exists some
2726 2727 # initial branch heads of the current branch, otherwise nothing is
2727 2728 # printed anyway.
2728 2729 #
2729 2730 # Par Msg Comment
2730 2731 # N N y additional topo root
2731 2732 #
2732 2733 # B N y additional branch root
2733 2734 # C N y additional topo head
2734 2735 # H N n usual case
2735 2736 #
2736 2737 # B B y weird additional branch root
2737 2738 # C B y branch merge
2738 2739 # H B n merge with named branch
2739 2740 #
2740 2741 # C C y additional head from merge
2741 2742 # C H n merge with a head
2742 2743 #
2743 2744 # H H n head merge: head count decreases
2744 2745
2745 2746 if not opts.get('close_branch'):
2746 2747 for r in parents:
2747 2748 if r.closesbranch() and r.branch() == branch:
2748 2749 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2749 2750
2750 2751 if repo.ui.debugflag:
2751 2752 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2752 2753 elif repo.ui.verbose:
2753 2754 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2754 2755
2755 2756 def postcommitstatus(repo, pats, opts):
2756 2757 return repo.status(match=scmutil.match(repo[None], pats, opts))
2757 2758
2758 2759 def revert(ui, repo, ctx, parents, *pats, **opts):
2759 2760 opts = pycompat.byteskwargs(opts)
2760 2761 parent, p2 = parents
2761 2762 node = ctx.node()
2762 2763
2763 2764 mf = ctx.manifest()
2764 2765 if node == p2:
2765 2766 parent = p2
2766 2767
2767 2768 # need all matching names in dirstate and manifest of target rev,
2768 2769 # so have to walk both. do not print errors if files exist in one
2769 2770 # but not other. in both cases, filesets should be evaluated against
2770 2771 # workingctx to get consistent result (issue4497). this means 'set:**'
2771 2772 # cannot be used to select missing files from target rev.
2772 2773
2773 2774 # `names` is a mapping for all elements in working copy and target revision
2774 2775 # The mapping is in the form:
2775 2776 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2776 2777 names = {}
2777 2778
2778 2779 with repo.wlock():
2779 2780 ## filling of the `names` mapping
2780 2781 # walk dirstate to fill `names`
2781 2782
2782 2783 interactive = opts.get('interactive', False)
2783 2784 wctx = repo[None]
2784 2785 m = scmutil.match(wctx, pats, opts)
2785 2786
2786 2787 # we'll need this later
2787 2788 targetsubs = sorted(s for s in wctx.substate if m(s))
2788 2789
2789 2790 if not m.always():
2790 2791 matcher = matchmod.badmatch(m, lambda x, y: False)
2791 2792 for abs in wctx.walk(matcher):
2792 2793 names[abs] = m.rel(abs), m.exact(abs)
2793 2794
2794 2795 # walk target manifest to fill `names`
2795 2796
2796 2797 def badfn(path, msg):
2797 2798 if path in names:
2798 2799 return
2799 2800 if path in ctx.substate:
2800 2801 return
2801 2802 path_ = path + '/'
2802 2803 for f in names:
2803 2804 if f.startswith(path_):
2804 2805 return
2805 2806 ui.warn("%s: %s\n" % (m.rel(path), msg))
2806 2807
2807 2808 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2808 2809 if abs not in names:
2809 2810 names[abs] = m.rel(abs), m.exact(abs)
2810 2811
2811 2812 # Find status of all file in `names`.
2812 2813 m = scmutil.matchfiles(repo, names)
2813 2814
2814 2815 changes = repo.status(node1=node, match=m,
2815 2816 unknown=True, ignored=True, clean=True)
2816 2817 else:
2817 2818 changes = repo.status(node1=node, match=m)
2818 2819 for kind in changes:
2819 2820 for abs in kind:
2820 2821 names[abs] = m.rel(abs), m.exact(abs)
2821 2822
2822 2823 m = scmutil.matchfiles(repo, names)
2823 2824
2824 2825 modified = set(changes.modified)
2825 2826 added = set(changes.added)
2826 2827 removed = set(changes.removed)
2827 2828 _deleted = set(changes.deleted)
2828 2829 unknown = set(changes.unknown)
2829 2830 unknown.update(changes.ignored)
2830 2831 clean = set(changes.clean)
2831 2832 modadded = set()
2832 2833
2833 2834 # We need to account for the state of the file in the dirstate,
2834 2835 # even when we revert against something else than parent. This will
2835 2836 # slightly alter the behavior of revert (doing back up or not, delete
2836 2837 # or just forget etc).
2837 2838 if parent == node:
2838 2839 dsmodified = modified
2839 2840 dsadded = added
2840 2841 dsremoved = removed
2841 2842 # store all local modifications, useful later for rename detection
2842 2843 localchanges = dsmodified | dsadded
2843 2844 modified, added, removed = set(), set(), set()
2844 2845 else:
2845 2846 changes = repo.status(node1=parent, match=m)
2846 2847 dsmodified = set(changes.modified)
2847 2848 dsadded = set(changes.added)
2848 2849 dsremoved = set(changes.removed)
2849 2850 # store all local modifications, useful later for rename detection
2850 2851 localchanges = dsmodified | dsadded
2851 2852
2852 2853 # only take into account for removes between wc and target
2853 2854 clean |= dsremoved - removed
2854 2855 dsremoved &= removed
2855 2856 # distinct between dirstate remove and other
2856 2857 removed -= dsremoved
2857 2858
2858 2859 modadded = added & dsmodified
2859 2860 added -= modadded
2860 2861
2861 2862 # tell newly modified apart.
2862 2863 dsmodified &= modified
2863 2864 dsmodified |= modified & dsadded # dirstate added may need backup
2864 2865 modified -= dsmodified
2865 2866
2866 2867 # We need to wait for some post-processing to update this set
2867 2868 # before making the distinction. The dirstate will be used for
2868 2869 # that purpose.
2869 2870 dsadded = added
2870 2871
2871 2872 # in case of merge, files that are actually added can be reported as
2872 2873 # modified, we need to post process the result
2873 2874 if p2 != nullid:
2874 2875 mergeadd = set(dsmodified)
2875 2876 for path in dsmodified:
2876 2877 if path in mf:
2877 2878 mergeadd.remove(path)
2878 2879 dsadded |= mergeadd
2879 2880 dsmodified -= mergeadd
2880 2881
2881 2882 # if f is a rename, update `names` to also revert the source
2882 2883 cwd = repo.getcwd()
2883 2884 for f in localchanges:
2884 2885 src = repo.dirstate.copied(f)
2885 2886 # XXX should we check for rename down to target node?
2886 2887 if src and src not in names and repo.dirstate[src] == 'r':
2887 2888 dsremoved.add(src)
2888 2889 names[src] = (repo.pathto(src, cwd), True)
2889 2890
2890 2891 # determine the exact nature of the deleted changesets
2891 2892 deladded = set(_deleted)
2892 2893 for path in _deleted:
2893 2894 if path in mf:
2894 2895 deladded.remove(path)
2895 2896 deleted = _deleted - deladded
2896 2897
2897 2898 # distinguish between file to forget and the other
2898 2899 added = set()
2899 2900 for abs in dsadded:
2900 2901 if repo.dirstate[abs] != 'a':
2901 2902 added.add(abs)
2902 2903 dsadded -= added
2903 2904
2904 2905 for abs in deladded:
2905 2906 if repo.dirstate[abs] == 'a':
2906 2907 dsadded.add(abs)
2907 2908 deladded -= dsadded
2908 2909
2909 2910 # For files marked as removed, we check if an unknown file is present at
2910 2911 # the same path. If a such file exists it may need to be backed up.
2911 2912 # Making the distinction at this stage helps have simpler backup
2912 2913 # logic.
2913 2914 removunk = set()
2914 2915 for abs in removed:
2915 2916 target = repo.wjoin(abs)
2916 2917 if os.path.lexists(target):
2917 2918 removunk.add(abs)
2918 2919 removed -= removunk
2919 2920
2920 2921 dsremovunk = set()
2921 2922 for abs in dsremoved:
2922 2923 target = repo.wjoin(abs)
2923 2924 if os.path.lexists(target):
2924 2925 dsremovunk.add(abs)
2925 2926 dsremoved -= dsremovunk
2926 2927
2927 2928 # action to be actually performed by revert
2928 2929 # (<list of file>, message>) tuple
2929 2930 actions = {'revert': ([], _('reverting %s\n')),
2930 2931 'add': ([], _('adding %s\n')),
2931 2932 'remove': ([], _('removing %s\n')),
2932 2933 'drop': ([], _('removing %s\n')),
2933 2934 'forget': ([], _('forgetting %s\n')),
2934 2935 'undelete': ([], _('undeleting %s\n')),
2935 2936 'noop': (None, _('no changes needed to %s\n')),
2936 2937 'unknown': (None, _('file not managed: %s\n')),
2937 2938 }
2938 2939
2939 2940 # "constant" that convey the backup strategy.
2940 2941 # All set to `discard` if `no-backup` is set do avoid checking
2941 2942 # no_backup lower in the code.
2942 2943 # These values are ordered for comparison purposes
2943 2944 backupinteractive = 3 # do backup if interactively modified
2944 2945 backup = 2 # unconditionally do backup
2945 2946 check = 1 # check if the existing file differs from target
2946 2947 discard = 0 # never do backup
2947 2948 if opts.get('no_backup'):
2948 2949 backupinteractive = backup = check = discard
2949 2950 if interactive:
2950 2951 dsmodifiedbackup = backupinteractive
2951 2952 else:
2952 2953 dsmodifiedbackup = backup
2953 2954 tobackup = set()
2954 2955
2955 2956 backupanddel = actions['remove']
2956 2957 if not opts.get('no_backup'):
2957 2958 backupanddel = actions['drop']
2958 2959
2959 2960 disptable = (
2960 2961 # dispatch table:
2961 2962 # file state
2962 2963 # action
2963 2964 # make backup
2964 2965
2965 2966 ## Sets that results that will change file on disk
2966 2967 # Modified compared to target, no local change
2967 2968 (modified, actions['revert'], discard),
2968 2969 # Modified compared to target, but local file is deleted
2969 2970 (deleted, actions['revert'], discard),
2970 2971 # Modified compared to target, local change
2971 2972 (dsmodified, actions['revert'], dsmodifiedbackup),
2972 2973 # Added since target
2973 2974 (added, actions['remove'], discard),
2974 2975 # Added in working directory
2975 2976 (dsadded, actions['forget'], discard),
2976 2977 # Added since target, have local modification
2977 2978 (modadded, backupanddel, backup),
2978 2979 # Added since target but file is missing in working directory
2979 2980 (deladded, actions['drop'], discard),
2980 2981 # Removed since target, before working copy parent
2981 2982 (removed, actions['add'], discard),
2982 2983 # Same as `removed` but an unknown file exists at the same path
2983 2984 (removunk, actions['add'], check),
2984 2985 # Removed since targe, marked as such in working copy parent
2985 2986 (dsremoved, actions['undelete'], discard),
2986 2987 # Same as `dsremoved` but an unknown file exists at the same path
2987 2988 (dsremovunk, actions['undelete'], check),
2988 2989 ## the following sets does not result in any file changes
2989 2990 # File with no modification
2990 2991 (clean, actions['noop'], discard),
2991 2992 # Existing file, not tracked anywhere
2992 2993 (unknown, actions['unknown'], discard),
2993 2994 )
2994 2995
2995 2996 for abs, (rel, exact) in sorted(names.items()):
2996 2997 # target file to be touch on disk (relative to cwd)
2997 2998 target = repo.wjoin(abs)
2998 2999 # search the entry in the dispatch table.
2999 3000 # if the file is in any of these sets, it was touched in the working
3000 3001 # directory parent and we are sure it needs to be reverted.
3001 3002 for table, (xlist, msg), dobackup in disptable:
3002 3003 if abs not in table:
3003 3004 continue
3004 3005 if xlist is not None:
3005 3006 xlist.append(abs)
3006 3007 if dobackup:
3007 3008 # If in interactive mode, don't automatically create
3008 3009 # .orig files (issue4793)
3009 3010 if dobackup == backupinteractive:
3010 3011 tobackup.add(abs)
3011 3012 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3012 3013 bakname = scmutil.origpath(ui, repo, rel)
3013 3014 ui.note(_('saving current version of %s as %s\n') %
3014 3015 (rel, bakname))
3015 3016 if not opts.get('dry_run'):
3016 3017 if interactive:
3017 3018 util.copyfile(target, bakname)
3018 3019 else:
3019 3020 util.rename(target, bakname)
3020 3021 if opts.get('dry_run'):
3021 3022 if ui.verbose or not exact:
3022 3023 ui.status(msg % rel)
3023 3024 elif exact:
3024 3025 ui.warn(msg % rel)
3025 3026 break
3026 3027
3027 3028 if not opts.get('dry_run'):
3028 3029 needdata = ('revert', 'add', 'undelete')
3029 3030 oplist = [actions[name][0] for name in needdata]
3030 3031 prefetch = scmutil.prefetchfiles
3031 3032 matchfiles = scmutil.matchfiles
3032 3033 prefetch(repo, [ctx.rev()],
3033 3034 matchfiles(repo,
3034 3035 [f for sublist in oplist for f in sublist]))
3035 3036 _performrevert(repo, parents, ctx, names, actions, interactive,
3036 3037 tobackup)
3037 3038
3038 3039 if targetsubs:
3039 3040 # Revert the subrepos on the revert list
3040 3041 for sub in targetsubs:
3041 3042 try:
3042 3043 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3043 3044 **pycompat.strkwargs(opts))
3044 3045 except KeyError:
3045 3046 raise error.Abort("subrepository '%s' does not exist in %s!"
3046 3047 % (sub, short(ctx.node())))
3047 3048
3048 3049 def _performrevert(repo, parents, ctx, names, actions, interactive=False,
3049 3050 tobackup=None):
3050 3051 """function that actually perform all the actions computed for revert
3051 3052
3052 3053 This is an independent function to let extension to plug in and react to
3053 3054 the imminent revert.
3054 3055
3055 3056 Make sure you have the working directory locked when calling this function.
3056 3057 """
3057 3058 parent, p2 = parents
3058 3059 node = ctx.node()
3059 3060 excluded_files = []
3060 3061
3061 3062 def checkout(f):
3062 3063 fc = ctx[f]
3063 3064 repo.wwrite(f, fc.data(), fc.flags())
3064 3065
3065 3066 def doremove(f):
3066 3067 try:
3067 3068 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
3068 3069 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3069 3070 except OSError:
3070 3071 pass
3071 3072 repo.dirstate.remove(f)
3072 3073
3073 3074 def prntstatusmsg(action, f):
3074 3075 rel, exact = names[f]
3075 3076 if repo.ui.verbose or not exact:
3076 3077 repo.ui.status(actions[action][1] % rel)
3077 3078
3078 3079 audit_path = pathutil.pathauditor(repo.root, cached=True)
3079 3080 for f in actions['forget'][0]:
3080 3081 if interactive:
3081 3082 choice = repo.ui.promptchoice(
3082 3083 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3083 3084 if choice == 0:
3084 3085 prntstatusmsg('forget', f)
3085 3086 repo.dirstate.drop(f)
3086 3087 else:
3087 3088 excluded_files.append(f)
3088 3089 else:
3089 3090 prntstatusmsg('forget', f)
3090 3091 repo.dirstate.drop(f)
3091 3092 for f in actions['remove'][0]:
3092 3093 audit_path(f)
3093 3094 if interactive:
3094 3095 choice = repo.ui.promptchoice(
3095 3096 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3096 3097 if choice == 0:
3097 3098 prntstatusmsg('remove', f)
3098 3099 doremove(f)
3099 3100 else:
3100 3101 excluded_files.append(f)
3101 3102 else:
3102 3103 prntstatusmsg('remove', f)
3103 3104 doremove(f)
3104 3105 for f in actions['drop'][0]:
3105 3106 audit_path(f)
3106 3107 prntstatusmsg('drop', f)
3107 3108 repo.dirstate.remove(f)
3108 3109
3109 3110 normal = None
3110 3111 if node == parent:
3111 3112 # We're reverting to our parent. If possible, we'd like status
3112 3113 # to report the file as clean. We have to use normallookup for
3113 3114 # merges to avoid losing information about merged/dirty files.
3114 3115 if p2 != nullid:
3115 3116 normal = repo.dirstate.normallookup
3116 3117 else:
3117 3118 normal = repo.dirstate.normal
3118 3119
3119 3120 newlyaddedandmodifiedfiles = set()
3120 3121 if interactive:
3121 3122 # Prompt the user for changes to revert
3122 3123 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3123 3124 m = scmutil.matchfiles(repo, torevert)
3124 3125 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3125 3126 diffopts.nodates = True
3126 3127 diffopts.git = True
3127 3128 operation = 'discard'
3128 3129 reversehunks = True
3129 3130 if node != parent:
3130 3131 operation = 'apply'
3131 3132 reversehunks = False
3132 3133 if reversehunks:
3133 3134 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3134 3135 else:
3135 3136 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3136 3137 originalchunks = patch.parsepatch(diff)
3137 3138
3138 3139 try:
3139 3140
3140 3141 chunks, opts = recordfilter(repo.ui, originalchunks,
3141 3142 operation=operation)
3142 3143 if reversehunks:
3143 3144 chunks = patch.reversehunks(chunks)
3144 3145
3145 3146 except error.PatchError as err:
3146 3147 raise error.Abort(_('error parsing patch: %s') % err)
3147 3148
3148 3149 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3149 3150 if tobackup is None:
3150 3151 tobackup = set()
3151 3152 # Apply changes
3152 3153 fp = stringio()
3153 3154 # chunks are serialized per file, but files aren't sorted
3154 3155 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3155 3156 prntstatusmsg('revert', f)
3156 3157 for c in chunks:
3157 3158 if ishunk(c):
3158 3159 abs = c.header.filename()
3159 3160 # Create a backup file only if this hunk should be backed up
3160 3161 if c.header.filename() in tobackup:
3161 3162 target = repo.wjoin(abs)
3162 3163 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3163 3164 util.copyfile(target, bakname)
3164 3165 tobackup.remove(abs)
3165 3166 c.write(fp)
3166 3167 dopatch = fp.tell()
3167 3168 fp.seek(0)
3168 3169 if dopatch:
3169 3170 try:
3170 3171 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3171 3172 except error.PatchError as err:
3172 3173 raise error.Abort(pycompat.bytestr(err))
3173 3174 del fp
3174 3175 else:
3175 3176 for f in actions['revert'][0]:
3176 3177 prntstatusmsg('revert', f)
3177 3178 checkout(f)
3178 3179 if normal:
3179 3180 normal(f)
3180 3181
3181 3182 for f in actions['add'][0]:
3182 3183 # Don't checkout modified files, they are already created by the diff
3183 3184 if f not in newlyaddedandmodifiedfiles:
3184 3185 prntstatusmsg('add', f)
3185 3186 checkout(f)
3186 3187 repo.dirstate.add(f)
3187 3188
3188 3189 normal = repo.dirstate.normallookup
3189 3190 if node == parent and p2 == nullid:
3190 3191 normal = repo.dirstate.normal
3191 3192 for f in actions['undelete'][0]:
3192 3193 prntstatusmsg('undelete', f)
3193 3194 checkout(f)
3194 3195 normal(f)
3195 3196
3196 3197 copied = copies.pathcopies(repo[parent], ctx)
3197 3198
3198 3199 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3199 3200 if f in copied:
3200 3201 repo.dirstate.copy(copied[f], f)
3201 3202
3202 3203 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3203 3204 # commands.outgoing. "missing" is "missing" of the result of
3204 3205 # "findcommonoutgoing()"
3205 3206 outgoinghooks = util.hooks()
3206 3207
3207 3208 # a list of (ui, repo) functions called by commands.summary
3208 3209 summaryhooks = util.hooks()
3209 3210
3210 3211 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3211 3212 #
3212 3213 # functions should return tuple of booleans below, if 'changes' is None:
3213 3214 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3214 3215 #
3215 3216 # otherwise, 'changes' is a tuple of tuples below:
3216 3217 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3217 3218 # - (desturl, destbranch, destpeer, outgoing)
3218 3219 summaryremotehooks = util.hooks()
3219 3220
3220 3221 # A list of state files kept by multistep operations like graft.
3221 3222 # Since graft cannot be aborted, it is considered 'clearable' by update.
3222 3223 # note: bisect is intentionally excluded
3223 3224 # (state file, clearable, allowcommit, error, hint)
3224 3225 unfinishedstates = [
3225 3226 ('graftstate', True, False, _('graft in progress'),
3226 3227 _("use 'hg graft --continue' or 'hg graft --stop' to stop")),
3227 3228 ('updatestate', True, False, _('last update was interrupted'),
3228 3229 _("use 'hg update' to get a consistent checkout"))
3229 3230 ]
3230 3231
3231 3232 def checkunfinished(repo, commit=False):
3232 3233 '''Look for an unfinished multistep operation, like graft, and abort
3233 3234 if found. It's probably good to check this right before
3234 3235 bailifchanged().
3235 3236 '''
3236 3237 # Check for non-clearable states first, so things like rebase will take
3237 3238 # precedence over update.
3238 3239 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3239 3240 if clearable or (commit and allowcommit):
3240 3241 continue
3241 3242 if repo.vfs.exists(f):
3242 3243 raise error.Abort(msg, hint=hint)
3243 3244
3244 3245 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3245 3246 if not clearable or (commit and allowcommit):
3246 3247 continue
3247 3248 if repo.vfs.exists(f):
3248 3249 raise error.Abort(msg, hint=hint)
3249 3250
3250 3251 def clearunfinished(repo):
3251 3252 '''Check for unfinished operations (as above), and clear the ones
3252 3253 that are clearable.
3253 3254 '''
3254 3255 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3255 3256 if not clearable and repo.vfs.exists(f):
3256 3257 raise error.Abort(msg, hint=hint)
3257 3258 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3258 3259 if clearable and repo.vfs.exists(f):
3259 3260 util.unlink(repo.vfs.join(f))
3260 3261
3261 3262 afterresolvedstates = [
3262 3263 ('graftstate',
3263 3264 _('hg graft --continue')),
3264 3265 ]
3265 3266
3266 3267 def howtocontinue(repo):
3267 3268 '''Check for an unfinished operation and return the command to finish
3268 3269 it.
3269 3270
3270 3271 afterresolvedstates tuples define a .hg/{file} and the corresponding
3271 3272 command needed to finish it.
3272 3273
3273 3274 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3274 3275 a boolean.
3275 3276 '''
3276 3277 contmsg = _("continue: %s")
3277 3278 for f, msg in afterresolvedstates:
3278 3279 if repo.vfs.exists(f):
3279 3280 return contmsg % msg, True
3280 3281 if repo[None].dirty(missing=True, merge=False, branch=False):
3281 3282 return contmsg % _("hg commit"), False
3282 3283 return None, None
3283 3284
3284 3285 def checkafterresolved(repo):
3285 3286 '''Inform the user about the next action after completing hg resolve
3286 3287
3287 3288 If there's a matching afterresolvedstates, howtocontinue will yield
3288 3289 repo.ui.warn as the reporter.
3289 3290
3290 3291 Otherwise, it will yield repo.ui.note.
3291 3292 '''
3292 3293 msg, warning = howtocontinue(repo)
3293 3294 if msg is not None:
3294 3295 if warning:
3295 3296 repo.ui.warn("%s\n" % msg)
3296 3297 else:
3297 3298 repo.ui.note("%s\n" % msg)
3298 3299
3299 3300 def wrongtooltocontinue(repo, task):
3300 3301 '''Raise an abort suggesting how to properly continue if there is an
3301 3302 active task.
3302 3303
3303 3304 Uses howtocontinue() to find the active task.
3304 3305
3305 3306 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3306 3307 a hint.
3307 3308 '''
3308 3309 after = howtocontinue(repo)
3309 3310 hint = None
3310 3311 if after[1]:
3311 3312 hint = after[0]
3312 3313 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,1801 +1,1802 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % inst.locker
175 175 else:
176 176 reason = _('lock held by %r') % inst.locker
177 177 ui.error(_("abort: %s: %s\n") % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename), reason))
179 179 if not inst.locker:
180 180 ui.error(_("(lock might be very busy)\n"))
181 181 except error.LockUnavailable as inst:
182 182 ui.error(_("abort: could not lock %s: %s\n") %
183 183 (inst.desc or stringutil.forcebytestr(inst.filename),
184 184 encoding.strtolocal(inst.strerror)))
185 185 except error.OutOfBandError as inst:
186 186 if inst.args:
187 187 msg = _("abort: remote error:\n")
188 188 else:
189 189 msg = _("abort: remote error\n")
190 190 ui.error(msg)
191 191 if inst.args:
192 192 ui.error(''.join(inst.args))
193 193 if inst.hint:
194 194 ui.error('(%s)\n' % inst.hint)
195 195 except error.RepoError as inst:
196 196 ui.error(_("abort: %s!\n") % inst)
197 197 if inst.hint:
198 198 ui.error(_("(%s)\n") % inst.hint)
199 199 except error.ResponseError as inst:
200 200 ui.error(_("abort: %s") % inst.args[0])
201 201 msg = inst.args[1]
202 202 if isinstance(msg, type(u'')):
203 203 msg = pycompat.sysbytes(msg)
204 204 if not isinstance(msg, bytes):
205 205 ui.error(" %r\n" % (msg,))
206 206 elif not msg:
207 207 ui.error(_(" empty string\n"))
208 208 else:
209 209 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 210 except error.CensoredNodeError as inst:
211 211 ui.error(_("abort: file censored %s!\n") % inst)
212 212 except error.StorageError as inst:
213 213 ui.error(_("abort: %s!\n") % inst)
214 214 except error.InterventionRequired as inst:
215 215 ui.error("%s\n" % inst)
216 216 if inst.hint:
217 217 ui.error(_("(%s)\n") % inst.hint)
218 218 return 1
219 219 except error.WdirUnsupported:
220 220 ui.error(_("abort: working directory revision cannot be specified\n"))
221 221 except error.Abort as inst:
222 222 ui.error(_("abort: %s\n") % inst)
223 223 if inst.hint:
224 224 ui.error(_("(%s)\n") % inst.hint)
225 225 except ImportError as inst:
226 226 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
227 227 m = stringutil.forcebytestr(inst).split()[-1]
228 228 if m in "mpatch bdiff".split():
229 229 ui.error(_("(did you forget to compile extensions?)\n"))
230 230 elif m in "zlib".split():
231 231 ui.error(_("(is your Python install correct?)\n"))
232 232 except IOError as inst:
233 233 if util.safehasattr(inst, "code"):
234 234 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
235 235 elif util.safehasattr(inst, "reason"):
236 236 try: # usually it is in the form (errno, strerror)
237 237 reason = inst.reason.args[1]
238 238 except (AttributeError, IndexError):
239 239 # it might be anything, for example a string
240 240 reason = inst.reason
241 241 if isinstance(reason, pycompat.unicode):
242 242 # SSLError of Python 2.7.9 contains a unicode
243 243 reason = encoding.unitolocal(reason)
244 244 ui.error(_("abort: error: %s\n") % reason)
245 245 elif (util.safehasattr(inst, "args")
246 246 and inst.args and inst.args[0] == errno.EPIPE):
247 247 pass
248 248 elif getattr(inst, "strerror", None):
249 249 if getattr(inst, "filename", None):
250 250 ui.error(_("abort: %s: %s\n") % (
251 251 encoding.strtolocal(inst.strerror),
252 252 stringutil.forcebytestr(inst.filename)))
253 253 else:
254 254 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
255 255 else:
256 256 raise
257 257 except OSError as inst:
258 258 if getattr(inst, "filename", None) is not None:
259 259 ui.error(_("abort: %s: '%s'\n") % (
260 260 encoding.strtolocal(inst.strerror),
261 261 stringutil.forcebytestr(inst.filename)))
262 262 else:
263 263 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
264 264 except MemoryError:
265 265 ui.error(_("abort: out of memory\n"))
266 266 except SystemExit as inst:
267 267 # Commands shouldn't sys.exit directly, but give a return code.
268 268 # Just in case catch this and and pass exit code to caller.
269 269 return inst.code
270 270 except socket.error as inst:
271 271 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
272 272
273 273 return -1
274 274
275 275 def checknewlabel(repo, lbl, kind):
276 276 # Do not use the "kind" parameter in ui output.
277 277 # It makes strings difficult to translate.
278 278 if lbl in ['tip', '.', 'null']:
279 279 raise error.Abort(_("the name '%s' is reserved") % lbl)
280 280 for c in (':', '\0', '\n', '\r'):
281 281 if c in lbl:
282 282 raise error.Abort(
283 283 _("%r cannot be used in a name") % pycompat.bytestr(c))
284 284 try:
285 285 int(lbl)
286 286 raise error.Abort(_("cannot use an integer as a name"))
287 287 except ValueError:
288 288 pass
289 289 if lbl.strip() != lbl:
290 290 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
291 291
292 292 def checkfilename(f):
293 293 '''Check that the filename f is an acceptable filename for a tracked file'''
294 294 if '\r' in f or '\n' in f:
295 295 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
296 296 % pycompat.bytestr(f))
297 297
298 298 def checkportable(ui, f):
299 299 '''Check if filename f is portable and warn or abort depending on config'''
300 300 checkfilename(f)
301 301 abort, warn = checkportabilityalert(ui)
302 302 if abort or warn:
303 303 msg = util.checkwinfilename(f)
304 304 if msg:
305 305 msg = "%s: %s" % (msg, procutil.shellquote(f))
306 306 if abort:
307 307 raise error.Abort(msg)
308 308 ui.warn(_("warning: %s\n") % msg)
309 309
310 310 def checkportabilityalert(ui):
311 311 '''check if the user's config requests nothing, a warning, or abort for
312 312 non-portable filenames'''
313 313 val = ui.config('ui', 'portablefilenames')
314 314 lval = val.lower()
315 315 bval = stringutil.parsebool(val)
316 316 abort = pycompat.iswindows or lval == 'abort'
317 317 warn = bval or lval == 'warn'
318 318 if bval is None and not (warn or abort or lval == 'ignore'):
319 319 raise error.ConfigError(
320 320 _("ui.portablefilenames value is invalid ('%s')") % val)
321 321 return abort, warn
322 322
323 323 class casecollisionauditor(object):
324 324 def __init__(self, ui, abort, dirstate):
325 325 self._ui = ui
326 326 self._abort = abort
327 327 allfiles = '\0'.join(dirstate._map)
328 328 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
329 329 self._dirstate = dirstate
330 330 # The purpose of _newfiles is so that we don't complain about
331 331 # case collisions if someone were to call this object with the
332 332 # same filename twice.
333 333 self._newfiles = set()
334 334
335 335 def __call__(self, f):
336 336 if f in self._newfiles:
337 337 return
338 338 fl = encoding.lower(f)
339 339 if fl in self._loweredfiles and f not in self._dirstate:
340 340 msg = _('possible case-folding collision for %s') % f
341 341 if self._abort:
342 342 raise error.Abort(msg)
343 343 self._ui.warn(_("warning: %s\n") % msg)
344 344 self._loweredfiles.add(fl)
345 345 self._newfiles.add(f)
346 346
347 347 def filteredhash(repo, maxrev):
348 348 """build hash of filtered revisions in the current repoview.
349 349
350 350 Multiple caches perform up-to-date validation by checking that the
351 351 tiprev and tipnode stored in the cache file match the current repository.
352 352 However, this is not sufficient for validating repoviews because the set
353 353 of revisions in the view may change without the repository tiprev and
354 354 tipnode changing.
355 355
356 356 This function hashes all the revs filtered from the view and returns
357 357 that SHA-1 digest.
358 358 """
359 359 cl = repo.changelog
360 360 if not cl.filteredrevs:
361 361 return None
362 362 key = None
363 363 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
364 364 if revs:
365 365 s = hashlib.sha1()
366 366 for rev in revs:
367 367 s.update('%d;' % rev)
368 368 key = s.digest()
369 369 return key
370 370
371 371 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
372 372 '''yield every hg repository under path, always recursively.
373 373 The recurse flag will only control recursion into repo working dirs'''
374 374 def errhandler(err):
375 375 if err.filename == path:
376 376 raise err
377 377 samestat = getattr(os.path, 'samestat', None)
378 378 if followsym and samestat is not None:
379 379 def adddir(dirlst, dirname):
380 380 dirstat = os.stat(dirname)
381 381 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
382 382 if not match:
383 383 dirlst.append(dirstat)
384 384 return not match
385 385 else:
386 386 followsym = False
387 387
388 388 if (seen_dirs is None) and followsym:
389 389 seen_dirs = []
390 390 adddir(seen_dirs, path)
391 391 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
392 392 dirs.sort()
393 393 if '.hg' in dirs:
394 394 yield root # found a repository
395 395 qroot = os.path.join(root, '.hg', 'patches')
396 396 if os.path.isdir(os.path.join(qroot, '.hg')):
397 397 yield qroot # we have a patch queue repo here
398 398 if recurse:
399 399 # avoid recursing inside the .hg directory
400 400 dirs.remove('.hg')
401 401 else:
402 402 dirs[:] = [] # don't descend further
403 403 elif followsym:
404 404 newdirs = []
405 405 for d in dirs:
406 406 fname = os.path.join(root, d)
407 407 if adddir(seen_dirs, fname):
408 408 if os.path.islink(fname):
409 409 for hgname in walkrepos(fname, True, seen_dirs):
410 410 yield hgname
411 411 else:
412 412 newdirs.append(d)
413 413 dirs[:] = newdirs
414 414
415 415 def binnode(ctx):
416 416 """Return binary node id for a given basectx"""
417 417 node = ctx.node()
418 418 if node is None:
419 419 return wdirid
420 420 return node
421 421
422 422 def intrev(ctx):
423 423 """Return integer for a given basectx that can be used in comparison or
424 424 arithmetic operation"""
425 425 rev = ctx.rev()
426 426 if rev is None:
427 427 return wdirrev
428 428 return rev
429 429
430 430 def formatchangeid(ctx):
431 431 """Format changectx as '{rev}:{node|formatnode}', which is the default
432 432 template provided by logcmdutil.changesettemplater"""
433 433 repo = ctx.repo()
434 434 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
435 435
436 436 def formatrevnode(ui, rev, node):
437 437 """Format given revision and node depending on the current verbosity"""
438 438 if ui.debugflag:
439 439 hexfunc = hex
440 440 else:
441 441 hexfunc = short
442 442 return '%d:%s' % (rev, hexfunc(node))
443 443
444 444 def resolvehexnodeidprefix(repo, prefix):
445 445 if (prefix.startswith('x') and
446 446 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
447 447 prefix = prefix[1:]
448 448 try:
449 449 # Uses unfiltered repo because it's faster when prefix is ambiguous/
450 450 # This matches the shortesthexnodeidprefix() function below.
451 451 node = repo.unfiltered().changelog._partialmatch(prefix)
452 452 except error.AmbiguousPrefixLookupError:
453 453 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
454 454 if revset:
455 455 # Clear config to avoid infinite recursion
456 456 configoverrides = {('experimental',
457 457 'revisions.disambiguatewithin'): None}
458 458 with repo.ui.configoverride(configoverrides):
459 459 revs = repo.anyrevs([revset], user=True)
460 460 matches = []
461 461 for rev in revs:
462 462 node = repo.changelog.node(rev)
463 463 if hex(node).startswith(prefix):
464 464 matches.append(node)
465 465 if len(matches) == 1:
466 466 return matches[0]
467 467 raise
468 468 if node is None:
469 469 return
470 470 repo.changelog.rev(node) # make sure node isn't filtered
471 471 return node
472 472
473 473 def mayberevnum(repo, prefix):
474 474 """Checks if the given prefix may be mistaken for a revision number"""
475 475 try:
476 476 i = int(prefix)
477 477 # if we are a pure int, then starting with zero will not be
478 478 # confused as a rev; or, obviously, if the int is larger
479 479 # than the value of the tip rev
480 480 if prefix[0:1] == b'0' or i >= len(repo):
481 481 return False
482 482 return True
483 483 except ValueError:
484 484 return False
485 485
486 486 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
487 487 """Find the shortest unambiguous prefix that matches hexnode.
488 488
489 489 If "cache" is not None, it must be a dictionary that can be used for
490 490 caching between calls to this method.
491 491 """
492 492 # _partialmatch() of filtered changelog could take O(len(repo)) time,
493 493 # which would be unacceptably slow. so we look for hash collision in
494 494 # unfiltered space, which means some hashes may be slightly longer.
495 495
496 496 def disambiguate(prefix):
497 497 """Disambiguate against revnums."""
498 498 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
499 499 if mayberevnum(repo, prefix):
500 500 return 'x' + prefix
501 501 else:
502 502 return prefix
503 503
504 504 hexnode = hex(node)
505 505 for length in range(len(prefix), len(hexnode) + 1):
506 506 prefix = hexnode[:length]
507 507 if not mayberevnum(repo, prefix):
508 508 return prefix
509 509
510 510 cl = repo.unfiltered().changelog
511 511 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
512 512 if revset:
513 513 revs = None
514 514 if cache is not None:
515 515 revs = cache.get('disambiguationrevset')
516 516 if revs is None:
517 517 revs = repo.anyrevs([revset], user=True)
518 518 if cache is not None:
519 519 cache['disambiguationrevset'] = revs
520 520 if cl.rev(node) in revs:
521 521 hexnode = hex(node)
522 522 nodetree = None
523 523 if cache is not None:
524 524 nodetree = cache.get('disambiguationnodetree')
525 525 if not nodetree:
526 526 try:
527 527 nodetree = parsers.nodetree(cl.index, len(revs))
528 528 except AttributeError:
529 529 # no native nodetree
530 530 pass
531 531 else:
532 532 for r in revs:
533 533 nodetree.insert(r)
534 534 if cache is not None:
535 535 cache['disambiguationnodetree'] = nodetree
536 536 if nodetree is not None:
537 537 length = max(nodetree.shortest(node), minlength)
538 538 prefix = hexnode[:length]
539 539 return disambiguate(prefix)
540 540 for length in range(minlength, len(hexnode) + 1):
541 541 matches = []
542 542 prefix = hexnode[:length]
543 543 for rev in revs:
544 544 otherhexnode = repo[rev].hex()
545 545 if prefix == otherhexnode[:length]:
546 546 matches.append(otherhexnode)
547 547 if len(matches) == 1:
548 548 return disambiguate(prefix)
549 549
550 550 try:
551 551 return disambiguate(cl.shortest(node, minlength))
552 552 except error.LookupError:
553 553 raise error.RepoLookupError()
554 554
555 555 def isrevsymbol(repo, symbol):
556 556 """Checks if a symbol exists in the repo.
557 557
558 558 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
559 559 symbol is an ambiguous nodeid prefix.
560 560 """
561 561 try:
562 562 revsymbol(repo, symbol)
563 563 return True
564 564 except error.RepoLookupError:
565 565 return False
566 566
567 567 def revsymbol(repo, symbol):
568 568 """Returns a context given a single revision symbol (as string).
569 569
570 570 This is similar to revsingle(), but accepts only a single revision symbol,
571 571 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
572 572 not "max(public())".
573 573 """
574 574 if not isinstance(symbol, bytes):
575 575 msg = ("symbol (%s of type %s) was not a string, did you mean "
576 576 "repo[symbol]?" % (symbol, type(symbol)))
577 577 raise error.ProgrammingError(msg)
578 578 try:
579 579 if symbol in ('.', 'tip', 'null'):
580 580 return repo[symbol]
581 581
582 582 try:
583 583 r = int(symbol)
584 584 if '%d' % r != symbol:
585 585 raise ValueError
586 586 l = len(repo.changelog)
587 587 if r < 0:
588 588 r += l
589 589 if r < 0 or r >= l and r != wdirrev:
590 590 raise ValueError
591 591 return repo[r]
592 592 except error.FilteredIndexError:
593 593 raise
594 594 except (ValueError, OverflowError, IndexError):
595 595 pass
596 596
597 597 if len(symbol) == 40:
598 598 try:
599 599 node = bin(symbol)
600 600 rev = repo.changelog.rev(node)
601 601 return repo[rev]
602 602 except error.FilteredLookupError:
603 603 raise
604 604 except (TypeError, LookupError):
605 605 pass
606 606
607 607 # look up bookmarks through the name interface
608 608 try:
609 609 node = repo.names.singlenode(repo, symbol)
610 610 rev = repo.changelog.rev(node)
611 611 return repo[rev]
612 612 except KeyError:
613 613 pass
614 614
615 615 node = resolvehexnodeidprefix(repo, symbol)
616 616 if node is not None:
617 617 rev = repo.changelog.rev(node)
618 618 return repo[rev]
619 619
620 620 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
621 621
622 622 except error.WdirUnsupported:
623 623 return repo[None]
624 624 except (error.FilteredIndexError, error.FilteredLookupError,
625 625 error.FilteredRepoLookupError):
626 626 raise _filterederror(repo, symbol)
627 627
628 628 def _filterederror(repo, changeid):
629 629 """build an exception to be raised about a filtered changeid
630 630
631 631 This is extracted in a function to help extensions (eg: evolve) to
632 632 experiment with various message variants."""
633 633 if repo.filtername.startswith('visible'):
634 634
635 635 # Check if the changeset is obsolete
636 636 unfilteredrepo = repo.unfiltered()
637 637 ctx = revsymbol(unfilteredrepo, changeid)
638 638
639 639 # If the changeset is obsolete, enrich the message with the reason
640 640 # that made this changeset not visible
641 641 if ctx.obsolete():
642 642 msg = obsutil._getfilteredreason(repo, changeid, ctx)
643 643 else:
644 644 msg = _("hidden revision '%s'") % changeid
645 645
646 646 hint = _('use --hidden to access hidden revisions')
647 647
648 648 return error.FilteredRepoLookupError(msg, hint=hint)
649 649 msg = _("filtered revision '%s' (not in '%s' subset)")
650 650 msg %= (changeid, repo.filtername)
651 651 return error.FilteredRepoLookupError(msg)
652 652
653 653 def revsingle(repo, revspec, default='.', localalias=None):
654 654 if not revspec and revspec != 0:
655 655 return repo[default]
656 656
657 657 l = revrange(repo, [revspec], localalias=localalias)
658 658 if not l:
659 659 raise error.Abort(_('empty revision set'))
660 660 return repo[l.last()]
661 661
662 662 def _pairspec(revspec):
663 663 tree = revsetlang.parse(revspec)
664 664 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
665 665
666 666 def revpair(repo, revs):
667 667 if not revs:
668 668 return repo['.'], repo[None]
669 669
670 670 l = revrange(repo, revs)
671 671
672 672 if not l:
673 673 first = second = None
674 674 elif l.isascending():
675 675 first = l.min()
676 676 second = l.max()
677 677 elif l.isdescending():
678 678 first = l.max()
679 679 second = l.min()
680 680 else:
681 681 first = l.first()
682 682 second = l.last()
683 683
684 684 if first is None:
685 685 raise error.Abort(_('empty revision range'))
686 686 if (first == second and len(revs) >= 2
687 687 and not all(revrange(repo, [r]) for r in revs)):
688 688 raise error.Abort(_('empty revision on one side of range'))
689 689
690 690 # if top-level is range expression, the result must always be a pair
691 691 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
692 692 return repo[first], repo[None]
693 693
694 694 return repo[first], repo[second]
695 695
696 696 def revrange(repo, specs, localalias=None):
697 697 """Execute 1 to many revsets and return the union.
698 698
699 699 This is the preferred mechanism for executing revsets using user-specified
700 700 config options, such as revset aliases.
701 701
702 702 The revsets specified by ``specs`` will be executed via a chained ``OR``
703 703 expression. If ``specs`` is empty, an empty result is returned.
704 704
705 705 ``specs`` can contain integers, in which case they are assumed to be
706 706 revision numbers.
707 707
708 708 It is assumed the revsets are already formatted. If you have arguments
709 709 that need to be expanded in the revset, call ``revsetlang.formatspec()``
710 710 and pass the result as an element of ``specs``.
711 711
712 712 Specifying a single revset is allowed.
713 713
714 714 Returns a ``revset.abstractsmartset`` which is a list-like interface over
715 715 integer revisions.
716 716 """
717 717 allspecs = []
718 718 for spec in specs:
719 719 if isinstance(spec, int):
720 720 spec = revsetlang.formatspec('rev(%d)', spec)
721 721 allspecs.append(spec)
722 722 return repo.anyrevs(allspecs, user=True, localalias=localalias)
723 723
724 724 def meaningfulparents(repo, ctx):
725 725 """Return list of meaningful (or all if debug) parentrevs for rev.
726 726
727 727 For merges (two non-nullrev revisions) both parents are meaningful.
728 728 Otherwise the first parent revision is considered meaningful if it
729 729 is not the preceding revision.
730 730 """
731 731 parents = ctx.parents()
732 732 if len(parents) > 1:
733 733 return parents
734 734 if repo.ui.debugflag:
735 735 return [parents[0], repo[nullrev]]
736 736 if parents[0].rev() >= intrev(ctx) - 1:
737 737 return []
738 738 return parents
739 739
740 740 def expandpats(pats):
741 741 '''Expand bare globs when running on windows.
742 742 On posix we assume it already has already been done by sh.'''
743 743 if not util.expandglobs:
744 744 return list(pats)
745 745 ret = []
746 746 for kindpat in pats:
747 747 kind, pat = matchmod._patsplit(kindpat, None)
748 748 if kind is None:
749 749 try:
750 750 globbed = glob.glob(pat)
751 751 except re.error:
752 752 globbed = [pat]
753 753 if globbed:
754 754 ret.extend(globbed)
755 755 continue
756 756 ret.append(kindpat)
757 757 return ret
758 758
759 759 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
760 760 badfn=None):
761 761 '''Return a matcher and the patterns that were used.
762 762 The matcher will warn about bad matches, unless an alternate badfn callback
763 763 is provided.'''
764 764 if pats == ("",):
765 765 pats = []
766 766 if opts is None:
767 767 opts = {}
768 768 if not globbed and default == 'relpath':
769 769 pats = expandpats(pats or [])
770 770
771 771 def bad(f, msg):
772 772 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
773 773
774 774 if badfn is None:
775 775 badfn = bad
776 776
777 777 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
778 778 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
779 779
780 780 if m.always():
781 781 pats = []
782 782 return m, pats
783 783
784 784 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
785 785 badfn=None):
786 786 '''Return a matcher that will warn about bad matches.'''
787 787 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
788 788
789 789 def matchall(repo):
790 790 '''Return a matcher that will efficiently match everything.'''
791 791 return matchmod.always(repo.root, repo.getcwd())
792 792
793 793 def matchfiles(repo, files, badfn=None):
794 794 '''Return a matcher that will efficiently match exactly these files.'''
795 795 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
796 796
797 797 def parsefollowlinespattern(repo, rev, pat, msg):
798 798 """Return a file name from `pat` pattern suitable for usage in followlines
799 799 logic.
800 800 """
801 801 if not matchmod.patkind(pat):
802 802 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 803 else:
804 804 ctx = repo[rev]
805 805 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
806 806 files = [f for f in ctx if m(f)]
807 807 if len(files) != 1:
808 808 raise error.ParseError(msg)
809 809 return files[0]
810 810
811 811 def origpath(ui, repo, filepath):
812 812 '''customize where .orig files are created
813 813
814 814 Fetch user defined path from config file: [ui] origbackuppath = <path>
815 815 Fall back to default (filepath with .orig suffix) if not specified
816 816 '''
817 817 origbackuppath = ui.config('ui', 'origbackuppath')
818 818 if not origbackuppath:
819 819 return filepath + ".orig"
820 820
821 821 # Convert filepath from an absolute path into a path inside the repo.
822 822 filepathfromroot = util.normpath(os.path.relpath(filepath,
823 823 start=repo.root))
824 824
825 825 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
826 826 origbackupdir = origvfs.dirname(filepathfromroot)
827 827 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
828 828 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
829 829
830 830 # Remove any files that conflict with the backup file's path
831 831 for f in reversed(list(util.finddirs(filepathfromroot))):
832 832 if origvfs.isfileorlink(f):
833 833 ui.note(_('removing conflicting file: %s\n')
834 834 % origvfs.join(f))
835 835 origvfs.unlink(f)
836 836 break
837 837
838 838 origvfs.makedirs(origbackupdir)
839 839
840 840 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
841 841 ui.note(_('removing conflicting directory: %s\n')
842 842 % origvfs.join(filepathfromroot))
843 843 origvfs.rmtree(filepathfromroot, forcibly=True)
844 844
845 845 return origvfs.join(filepathfromroot)
846 846
847 847 class _containsnode(object):
848 848 """proxy __contains__(node) to container.__contains__ which accepts revs"""
849 849
850 850 def __init__(self, repo, revcontainer):
851 851 self._torev = repo.changelog.rev
852 852 self._revcontains = revcontainer.__contains__
853 853
854 854 def __contains__(self, node):
855 855 return self._revcontains(self._torev(node))
856 856
857 857 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
858 858 fixphase=False, targetphase=None, backup=True):
859 859 """do common cleanups when old nodes are replaced by new nodes
860 860
861 861 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
862 862 (we might also want to move working directory parent in the future)
863 863
864 864 By default, bookmark moves are calculated automatically from 'replacements',
865 865 but 'moves' can be used to override that. Also, 'moves' may include
866 866 additional bookmark moves that should not have associated obsmarkers.
867 867
868 868 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
869 869 have replacements. operation is a string, like "rebase".
870 870
871 871 metadata is dictionary containing metadata to be stored in obsmarker if
872 872 obsolescence is enabled.
873 873 """
874 874 assert fixphase or targetphase is None
875 875 if not replacements and not moves:
876 876 return
877 877
878 878 # translate mapping's other forms
879 879 if not util.safehasattr(replacements, 'items'):
880 880 replacements = {(n,): () for n in replacements}
881 881 else:
882 882 # upgrading non tuple "source" to tuple ones for BC
883 883 repls = {}
884 884 for key, value in replacements.items():
885 885 if not isinstance(key, tuple):
886 886 key = (key,)
887 887 repls[key] = value
888 888 replacements = repls
889 889
890 890 # Calculate bookmark movements
891 891 if moves is None:
892 892 moves = {}
893 893 # Unfiltered repo is needed since nodes in replacements might be hidden.
894 894 unfi = repo.unfiltered()
895 895 for oldnodes, newnodes in replacements.items():
896 896 for oldnode in oldnodes:
897 897 if oldnode in moves:
898 898 continue
899 899 if len(newnodes) > 1:
900 900 # usually a split, take the one with biggest rev number
901 901 newnode = next(unfi.set('max(%ln)', newnodes)).node()
902 902 elif len(newnodes) == 0:
903 903 # move bookmark backwards
904 904 allreplaced = []
905 905 for rep in replacements:
906 906 allreplaced.extend(rep)
907 907 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
908 908 allreplaced))
909 909 if roots:
910 910 newnode = roots[0].node()
911 911 else:
912 912 newnode = nullid
913 913 else:
914 914 newnode = newnodes[0]
915 915 moves[oldnode] = newnode
916 916
917 917 allnewnodes = [n for ns in replacements.values() for n in ns]
918 918 toretract = {}
919 919 toadvance = {}
920 920 if fixphase:
921 921 precursors = {}
922 922 for oldnodes, newnodes in replacements.items():
923 923 for oldnode in oldnodes:
924 924 for newnode in newnodes:
925 925 precursors.setdefault(newnode, []).append(oldnode)
926 926
927 927 allnewnodes.sort(key=lambda n: unfi[n].rev())
928 928 newphases = {}
929 929 def phase(ctx):
930 930 return newphases.get(ctx.node(), ctx.phase())
931 931 for newnode in allnewnodes:
932 932 ctx = unfi[newnode]
933 933 parentphase = max(phase(p) for p in ctx.parents())
934 934 if targetphase is None:
935 935 oldphase = max(unfi[oldnode].phase()
936 936 for oldnode in precursors[newnode])
937 937 newphase = max(oldphase, parentphase)
938 938 else:
939 939 newphase = max(targetphase, parentphase)
940 940 newphases[newnode] = newphase
941 941 if newphase > ctx.phase():
942 942 toretract.setdefault(newphase, []).append(newnode)
943 943 elif newphase < ctx.phase():
944 944 toadvance.setdefault(newphase, []).append(newnode)
945 945
946 946 with repo.transaction('cleanup') as tr:
947 947 # Move bookmarks
948 948 bmarks = repo._bookmarks
949 949 bmarkchanges = []
950 950 for oldnode, newnode in moves.items():
951 951 oldbmarks = repo.nodebookmarks(oldnode)
952 952 if not oldbmarks:
953 953 continue
954 954 from . import bookmarks # avoid import cycle
955 955 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
956 956 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
957 957 hex(oldnode), hex(newnode)))
958 958 # Delete divergent bookmarks being parents of related newnodes
959 959 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
960 960 allnewnodes, newnode, oldnode)
961 961 deletenodes = _containsnode(repo, deleterevs)
962 962 for name in oldbmarks:
963 963 bmarkchanges.append((name, newnode))
964 964 for b in bookmarks.divergent2delete(repo, deletenodes, name):
965 965 bmarkchanges.append((b, None))
966 966
967 967 if bmarkchanges:
968 968 bmarks.applychanges(repo, tr, bmarkchanges)
969 969
970 970 for phase, nodes in toretract.items():
971 971 phases.retractboundary(repo, tr, phase, nodes)
972 972 for phase, nodes in toadvance.items():
973 973 phases.advanceboundary(repo, tr, phase, nodes)
974 974
975 975 # Obsolete or strip nodes
976 976 if obsolete.isenabled(repo, obsolete.createmarkersopt):
977 977 # If a node is already obsoleted, and we want to obsolete it
978 978 # without a successor, skip that obssolete request since it's
979 979 # unnecessary. That's the "if s or not isobs(n)" check below.
980 980 # Also sort the node in topology order, that might be useful for
981 981 # some obsstore logic.
982 982 # NOTE: the sorting might belong to createmarkers.
983 983 torev = unfi.changelog.rev
984 984 sortfunc = lambda ns: torev(ns[0][0])
985 985 rels = []
986 986 for ns, s in sorted(replacements.items(), key=sortfunc):
987 987 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
988 988 rels.append(rel)
989 989 if rels:
990 990 obsolete.createmarkers(repo, rels, operation=operation,
991 991 metadata=metadata)
992 992 else:
993 993 from . import repair # avoid import cycle
994 994 tostrip = list(n for ns in replacements for n in ns)
995 995 if tostrip:
996 996 repair.delayedstrip(repo.ui, repo, tostrip, operation,
997 997 backup=backup)
998 998
999 999 def addremove(repo, matcher, prefix, opts=None):
1000 1000 if opts is None:
1001 1001 opts = {}
1002 1002 m = matcher
1003 1003 dry_run = opts.get('dry_run')
1004 1004 try:
1005 1005 similarity = float(opts.get('similarity') or 0)
1006 1006 except ValueError:
1007 1007 raise error.Abort(_('similarity must be a number'))
1008 1008 if similarity < 0 or similarity > 100:
1009 1009 raise error.Abort(_('similarity must be between 0 and 100'))
1010 1010 similarity /= 100.0
1011 1011
1012 1012 ret = 0
1013 1013 join = lambda f: os.path.join(prefix, f)
1014 1014
1015 1015 wctx = repo[None]
1016 1016 for subpath in sorted(wctx.substate):
1017 1017 submatch = matchmod.subdirmatcher(subpath, m)
1018 1018 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1019 1019 sub = wctx.sub(subpath)
1020 1020 try:
1021 1021 if sub.addremove(submatch, prefix, opts):
1022 1022 ret = 1
1023 1023 except error.LookupError:
1024 1024 repo.ui.status(_("skipping missing subrepository: %s\n")
1025 1025 % join(subpath))
1026 1026
1027 1027 rejected = []
1028 1028 def badfn(f, msg):
1029 1029 if f in m.files():
1030 1030 m.bad(f, msg)
1031 1031 rejected.append(f)
1032 1032
1033 1033 badmatch = matchmod.badmatch(m, badfn)
1034 1034 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1035 1035 badmatch)
1036 1036
1037 1037 unknownset = set(unknown + forgotten)
1038 1038 toprint = unknownset.copy()
1039 1039 toprint.update(deleted)
1040 1040 for abs in sorted(toprint):
1041 1041 if repo.ui.verbose or not m.exact(abs):
1042 1042 if abs in unknownset:
1043 1043 status = _('adding %s\n') % m.uipath(abs)
1044 1044 label = 'addremove.added'
1045 1045 else:
1046 1046 status = _('removing %s\n') % m.uipath(abs)
1047 1047 label = 'addremove.removed'
1048 1048 repo.ui.status(status, label=label)
1049 1049
1050 1050 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1051 1051 similarity)
1052 1052
1053 1053 if not dry_run:
1054 1054 _markchanges(repo, unknown + forgotten, deleted, renames)
1055 1055
1056 1056 for f in rejected:
1057 1057 if f in m.files():
1058 1058 return 1
1059 1059 return ret
1060 1060
1061 1061 def marktouched(repo, files, similarity=0.0):
1062 1062 '''Assert that files have somehow been operated upon. files are relative to
1063 1063 the repo root.'''
1064 1064 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1065 1065 rejected = []
1066 1066
1067 1067 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1068 1068
1069 1069 if repo.ui.verbose:
1070 1070 unknownset = set(unknown + forgotten)
1071 1071 toprint = unknownset.copy()
1072 1072 toprint.update(deleted)
1073 1073 for abs in sorted(toprint):
1074 1074 if abs in unknownset:
1075 1075 status = _('adding %s\n') % abs
1076 1076 else:
1077 1077 status = _('removing %s\n') % abs
1078 1078 repo.ui.status(status)
1079 1079
1080 1080 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1081 1081 similarity)
1082 1082
1083 1083 _markchanges(repo, unknown + forgotten, deleted, renames)
1084 1084
1085 1085 for f in rejected:
1086 1086 if f in m.files():
1087 1087 return 1
1088 1088 return 0
1089 1089
1090 1090 def _interestingfiles(repo, matcher):
1091 1091 '''Walk dirstate with matcher, looking for files that addremove would care
1092 1092 about.
1093 1093
1094 1094 This is different from dirstate.status because it doesn't care about
1095 1095 whether files are modified or clean.'''
1096 1096 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1097 1097 audit_path = pathutil.pathauditor(repo.root, cached=True)
1098 1098
1099 1099 ctx = repo[None]
1100 1100 dirstate = repo.dirstate
1101 matcher = repo.narrowmatch(matcher, includeexact=True)
1101 1102 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1102 1103 unknown=True, ignored=False, full=False)
1103 1104 for abs, st in walkresults.iteritems():
1104 1105 dstate = dirstate[abs]
1105 1106 if dstate == '?' and audit_path.check(abs):
1106 1107 unknown.append(abs)
1107 1108 elif dstate != 'r' and not st:
1108 1109 deleted.append(abs)
1109 1110 elif dstate == 'r' and st:
1110 1111 forgotten.append(abs)
1111 1112 # for finding renames
1112 1113 elif dstate == 'r' and not st:
1113 1114 removed.append(abs)
1114 1115 elif dstate == 'a':
1115 1116 added.append(abs)
1116 1117
1117 1118 return added, unknown, deleted, removed, forgotten
1118 1119
1119 1120 def _findrenames(repo, matcher, added, removed, similarity):
1120 1121 '''Find renames from removed files to added ones.'''
1121 1122 renames = {}
1122 1123 if similarity > 0:
1123 1124 for old, new, score in similar.findrenames(repo, added, removed,
1124 1125 similarity):
1125 1126 if (repo.ui.verbose or not matcher.exact(old)
1126 1127 or not matcher.exact(new)):
1127 1128 repo.ui.status(_('recording removal of %s as rename to %s '
1128 1129 '(%d%% similar)\n') %
1129 1130 (matcher.rel(old), matcher.rel(new),
1130 1131 score * 100))
1131 1132 renames[new] = old
1132 1133 return renames
1133 1134
1134 1135 def _markchanges(repo, unknown, deleted, renames):
1135 1136 '''Marks the files in unknown as added, the files in deleted as removed,
1136 1137 and the files in renames as copied.'''
1137 1138 wctx = repo[None]
1138 1139 with repo.wlock():
1139 1140 wctx.forget(deleted)
1140 1141 wctx.add(unknown)
1141 1142 for new, old in renames.iteritems():
1142 1143 wctx.copy(old, new)
1143 1144
1144 1145 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1145 1146 """Update the dirstate to reflect the intent of copying src to dst. For
1146 1147 different reasons it might not end with dst being marked as copied from src.
1147 1148 """
1148 1149 origsrc = repo.dirstate.copied(src) or src
1149 1150 if dst == origsrc: # copying back a copy?
1150 1151 if repo.dirstate[dst] not in 'mn' and not dryrun:
1151 1152 repo.dirstate.normallookup(dst)
1152 1153 else:
1153 1154 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1154 1155 if not ui.quiet:
1155 1156 ui.warn(_("%s has not been committed yet, so no copy "
1156 1157 "data will be stored for %s.\n")
1157 1158 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1158 1159 if repo.dirstate[dst] in '?r' and not dryrun:
1159 1160 wctx.add([dst])
1160 1161 elif not dryrun:
1161 1162 wctx.copy(origsrc, dst)
1162 1163
1163 1164 def writerequires(opener, requirements):
1164 1165 with opener('requires', 'w') as fp:
1165 1166 for r in sorted(requirements):
1166 1167 fp.write("%s\n" % r)
1167 1168
1168 1169 class filecachesubentry(object):
1169 1170 def __init__(self, path, stat):
1170 1171 self.path = path
1171 1172 self.cachestat = None
1172 1173 self._cacheable = None
1173 1174
1174 1175 if stat:
1175 1176 self.cachestat = filecachesubentry.stat(self.path)
1176 1177
1177 1178 if self.cachestat:
1178 1179 self._cacheable = self.cachestat.cacheable()
1179 1180 else:
1180 1181 # None means we don't know yet
1181 1182 self._cacheable = None
1182 1183
1183 1184 def refresh(self):
1184 1185 if self.cacheable():
1185 1186 self.cachestat = filecachesubentry.stat(self.path)
1186 1187
1187 1188 def cacheable(self):
1188 1189 if self._cacheable is not None:
1189 1190 return self._cacheable
1190 1191
1191 1192 # we don't know yet, assume it is for now
1192 1193 return True
1193 1194
1194 1195 def changed(self):
1195 1196 # no point in going further if we can't cache it
1196 1197 if not self.cacheable():
1197 1198 return True
1198 1199
1199 1200 newstat = filecachesubentry.stat(self.path)
1200 1201
1201 1202 # we may not know if it's cacheable yet, check again now
1202 1203 if newstat and self._cacheable is None:
1203 1204 self._cacheable = newstat.cacheable()
1204 1205
1205 1206 # check again
1206 1207 if not self._cacheable:
1207 1208 return True
1208 1209
1209 1210 if self.cachestat != newstat:
1210 1211 self.cachestat = newstat
1211 1212 return True
1212 1213 else:
1213 1214 return False
1214 1215
1215 1216 @staticmethod
1216 1217 def stat(path):
1217 1218 try:
1218 1219 return util.cachestat(path)
1219 1220 except OSError as e:
1220 1221 if e.errno != errno.ENOENT:
1221 1222 raise
1222 1223
1223 1224 class filecacheentry(object):
1224 1225 def __init__(self, paths, stat=True):
1225 1226 self._entries = []
1226 1227 for path in paths:
1227 1228 self._entries.append(filecachesubentry(path, stat))
1228 1229
1229 1230 def changed(self):
1230 1231 '''true if any entry has changed'''
1231 1232 for entry in self._entries:
1232 1233 if entry.changed():
1233 1234 return True
1234 1235 return False
1235 1236
1236 1237 def refresh(self):
1237 1238 for entry in self._entries:
1238 1239 entry.refresh()
1239 1240
1240 1241 class filecache(object):
1241 1242 """A property like decorator that tracks files under .hg/ for updates.
1242 1243
1243 1244 On first access, the files defined as arguments are stat()ed and the
1244 1245 results cached. The decorated function is called. The results are stashed
1245 1246 away in a ``_filecache`` dict on the object whose method is decorated.
1246 1247
1247 1248 On subsequent access, the cached result is returned.
1248 1249
1249 1250 On external property set operations, stat() calls are performed and the new
1250 1251 value is cached.
1251 1252
1252 1253 On property delete operations, cached data is removed.
1253 1254
1254 1255 When using the property API, cached data is always returned, if available:
1255 1256 no stat() is performed to check if the file has changed and if the function
1256 1257 needs to be called to reflect file changes.
1257 1258
1258 1259 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1259 1260 can populate an entry before the property's getter is called. In this case,
1260 1261 entries in ``_filecache`` will be used during property operations,
1261 1262 if available. If the underlying file changes, it is up to external callers
1262 1263 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1263 1264 method result as well as possibly calling ``del obj._filecache[attr]`` to
1264 1265 remove the ``filecacheentry``.
1265 1266 """
1266 1267
1267 1268 def __init__(self, *paths):
1268 1269 self.paths = paths
1269 1270
1270 1271 def join(self, obj, fname):
1271 1272 """Used to compute the runtime path of a cached file.
1272 1273
1273 1274 Users should subclass filecache and provide their own version of this
1274 1275 function to call the appropriate join function on 'obj' (an instance
1275 1276 of the class that its member function was decorated).
1276 1277 """
1277 1278 raise NotImplementedError
1278 1279
1279 1280 def __call__(self, func):
1280 1281 self.func = func
1281 1282 self.sname = func.__name__
1282 1283 self.name = pycompat.sysbytes(self.sname)
1283 1284 return self
1284 1285
1285 1286 def __get__(self, obj, type=None):
1286 1287 # if accessed on the class, return the descriptor itself.
1287 1288 if obj is None:
1288 1289 return self
1289 1290 # do we need to check if the file changed?
1290 1291 if self.sname in obj.__dict__:
1291 1292 assert self.name in obj._filecache, self.name
1292 1293 return obj.__dict__[self.sname]
1293 1294
1294 1295 entry = obj._filecache.get(self.name)
1295 1296
1296 1297 if entry:
1297 1298 if entry.changed():
1298 1299 entry.obj = self.func(obj)
1299 1300 else:
1300 1301 paths = [self.join(obj, path) for path in self.paths]
1301 1302
1302 1303 # We stat -before- creating the object so our cache doesn't lie if
1303 1304 # a writer modified between the time we read and stat
1304 1305 entry = filecacheentry(paths, True)
1305 1306 entry.obj = self.func(obj)
1306 1307
1307 1308 obj._filecache[self.name] = entry
1308 1309
1309 1310 obj.__dict__[self.sname] = entry.obj
1310 1311 return entry.obj
1311 1312
1312 1313 def __set__(self, obj, value):
1313 1314 if self.name not in obj._filecache:
1314 1315 # we add an entry for the missing value because X in __dict__
1315 1316 # implies X in _filecache
1316 1317 paths = [self.join(obj, path) for path in self.paths]
1317 1318 ce = filecacheentry(paths, False)
1318 1319 obj._filecache[self.name] = ce
1319 1320 else:
1320 1321 ce = obj._filecache[self.name]
1321 1322
1322 1323 ce.obj = value # update cached copy
1323 1324 obj.__dict__[self.sname] = value # update copy returned by obj.x
1324 1325
1325 1326 def __delete__(self, obj):
1326 1327 try:
1327 1328 del obj.__dict__[self.sname]
1328 1329 except KeyError:
1329 1330 raise AttributeError(self.sname)
1330 1331
1331 1332 def extdatasource(repo, source):
1332 1333 """Gather a map of rev -> value dict from the specified source
1333 1334
1334 1335 A source spec is treated as a URL, with a special case shell: type
1335 1336 for parsing the output from a shell command.
1336 1337
1337 1338 The data is parsed as a series of newline-separated records where
1338 1339 each record is a revision specifier optionally followed by a space
1339 1340 and a freeform string value. If the revision is known locally, it
1340 1341 is converted to a rev, otherwise the record is skipped.
1341 1342
1342 1343 Note that both key and value are treated as UTF-8 and converted to
1343 1344 the local encoding. This allows uniformity between local and
1344 1345 remote data sources.
1345 1346 """
1346 1347
1347 1348 spec = repo.ui.config("extdata", source)
1348 1349 if not spec:
1349 1350 raise error.Abort(_("unknown extdata source '%s'") % source)
1350 1351
1351 1352 data = {}
1352 1353 src = proc = None
1353 1354 try:
1354 1355 if spec.startswith("shell:"):
1355 1356 # external commands should be run relative to the repo root
1356 1357 cmd = spec[6:]
1357 1358 proc = subprocess.Popen(procutil.tonativestr(cmd),
1358 1359 shell=True, bufsize=-1,
1359 1360 close_fds=procutil.closefds,
1360 1361 stdout=subprocess.PIPE,
1361 1362 cwd=procutil.tonativestr(repo.root))
1362 1363 src = proc.stdout
1363 1364 else:
1364 1365 # treat as a URL or file
1365 1366 src = url.open(repo.ui, spec)
1366 1367 for l in src:
1367 1368 if " " in l:
1368 1369 k, v = l.strip().split(" ", 1)
1369 1370 else:
1370 1371 k, v = l.strip(), ""
1371 1372
1372 1373 k = encoding.tolocal(k)
1373 1374 try:
1374 1375 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1375 1376 except (error.LookupError, error.RepoLookupError):
1376 1377 pass # we ignore data for nodes that don't exist locally
1377 1378 finally:
1378 1379 if proc:
1379 1380 proc.communicate()
1380 1381 if src:
1381 1382 src.close()
1382 1383 if proc and proc.returncode != 0:
1383 1384 raise error.Abort(_("extdata command '%s' failed: %s")
1384 1385 % (cmd, procutil.explainexit(proc.returncode)))
1385 1386
1386 1387 return data
1387 1388
1388 1389 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1389 1390 if lock is None:
1390 1391 raise error.LockInheritanceContractViolation(
1391 1392 'lock can only be inherited while held')
1392 1393 if environ is None:
1393 1394 environ = {}
1394 1395 with lock.inherit() as locker:
1395 1396 environ[envvar] = locker
1396 1397 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1397 1398
1398 1399 def wlocksub(repo, cmd, *args, **kwargs):
1399 1400 """run cmd as a subprocess that allows inheriting repo's wlock
1400 1401
1401 1402 This can only be called while the wlock is held. This takes all the
1402 1403 arguments that ui.system does, and returns the exit code of the
1403 1404 subprocess."""
1404 1405 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1405 1406 **kwargs)
1406 1407
1407 1408 class progress(object):
1408 1409 def __init__(self, ui, topic, unit="", total=None):
1409 1410 self.ui = ui
1410 1411 self.pos = 0
1411 1412 self.topic = topic
1412 1413 self.unit = unit
1413 1414 self.total = total
1414 1415
1415 1416 def __enter__(self):
1416 1417 return self
1417 1418
1418 1419 def __exit__(self, exc_type, exc_value, exc_tb):
1419 1420 self.complete()
1420 1421
1421 1422 def update(self, pos, item="", total=None):
1422 1423 assert pos is not None
1423 1424 if total:
1424 1425 self.total = total
1425 1426 self.pos = pos
1426 1427 self._print(item)
1427 1428
1428 1429 def increment(self, step=1, item="", total=None):
1429 1430 self.update(self.pos + step, item, total)
1430 1431
1431 1432 def complete(self):
1432 1433 self.ui.progress(self.topic, None)
1433 1434
1434 1435 def _print(self, item):
1435 1436 self.ui.progress(self.topic, self.pos, item, self.unit,
1436 1437 self.total)
1437 1438
1438 1439 def gdinitconfig(ui):
1439 1440 """helper function to know if a repo should be created as general delta
1440 1441 """
1441 1442 # experimental config: format.generaldelta
1442 1443 return (ui.configbool('format', 'generaldelta')
1443 1444 or ui.configbool('format', 'usegeneraldelta')
1444 1445 or ui.configbool('format', 'sparse-revlog'))
1445 1446
1446 1447 def gddeltaconfig(ui):
1447 1448 """helper function to know if incoming delta should be optimised
1448 1449 """
1449 1450 # experimental config: format.generaldelta
1450 1451 return ui.configbool('format', 'generaldelta')
1451 1452
1452 1453 class simplekeyvaluefile(object):
1453 1454 """A simple file with key=value lines
1454 1455
1455 1456 Keys must be alphanumerics and start with a letter, values must not
1456 1457 contain '\n' characters"""
1457 1458 firstlinekey = '__firstline'
1458 1459
1459 1460 def __init__(self, vfs, path, keys=None):
1460 1461 self.vfs = vfs
1461 1462 self.path = path
1462 1463
1463 1464 def read(self, firstlinenonkeyval=False):
1464 1465 """Read the contents of a simple key-value file
1465 1466
1466 1467 'firstlinenonkeyval' indicates whether the first line of file should
1467 1468 be treated as a key-value pair or reuturned fully under the
1468 1469 __firstline key."""
1469 1470 lines = self.vfs.readlines(self.path)
1470 1471 d = {}
1471 1472 if firstlinenonkeyval:
1472 1473 if not lines:
1473 1474 e = _("empty simplekeyvalue file")
1474 1475 raise error.CorruptedState(e)
1475 1476 # we don't want to include '\n' in the __firstline
1476 1477 d[self.firstlinekey] = lines[0][:-1]
1477 1478 del lines[0]
1478 1479
1479 1480 try:
1480 1481 # the 'if line.strip()' part prevents us from failing on empty
1481 1482 # lines which only contain '\n' therefore are not skipped
1482 1483 # by 'if line'
1483 1484 updatedict = dict(line[:-1].split('=', 1) for line in lines
1484 1485 if line.strip())
1485 1486 if self.firstlinekey in updatedict:
1486 1487 e = _("%r can't be used as a key")
1487 1488 raise error.CorruptedState(e % self.firstlinekey)
1488 1489 d.update(updatedict)
1489 1490 except ValueError as e:
1490 1491 raise error.CorruptedState(str(e))
1491 1492 return d
1492 1493
1493 1494 def write(self, data, firstline=None):
1494 1495 """Write key=>value mapping to a file
1495 1496 data is a dict. Keys must be alphanumerical and start with a letter.
1496 1497 Values must not contain newline characters.
1497 1498
1498 1499 If 'firstline' is not None, it is written to file before
1499 1500 everything else, as it is, not in a key=value form"""
1500 1501 lines = []
1501 1502 if firstline is not None:
1502 1503 lines.append('%s\n' % firstline)
1503 1504
1504 1505 for k, v in data.items():
1505 1506 if k == self.firstlinekey:
1506 1507 e = "key name '%s' is reserved" % self.firstlinekey
1507 1508 raise error.ProgrammingError(e)
1508 1509 if not k[0:1].isalpha():
1509 1510 e = "keys must start with a letter in a key-value file"
1510 1511 raise error.ProgrammingError(e)
1511 1512 if not k.isalnum():
1512 1513 e = "invalid key name in a simple key-value file"
1513 1514 raise error.ProgrammingError(e)
1514 1515 if '\n' in v:
1515 1516 e = "invalid value in a simple key-value file"
1516 1517 raise error.ProgrammingError(e)
1517 1518 lines.append("%s=%s\n" % (k, v))
1518 1519 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1519 1520 fp.write(''.join(lines))
1520 1521
1521 1522 _reportobsoletedsource = [
1522 1523 'debugobsolete',
1523 1524 'pull',
1524 1525 'push',
1525 1526 'serve',
1526 1527 'unbundle',
1527 1528 ]
1528 1529
1529 1530 _reportnewcssource = [
1530 1531 'pull',
1531 1532 'unbundle',
1532 1533 ]
1533 1534
1534 1535 def prefetchfiles(repo, revs, match):
1535 1536 """Invokes the registered file prefetch functions, allowing extensions to
1536 1537 ensure the corresponding files are available locally, before the command
1537 1538 uses them."""
1538 1539 if match:
1539 1540 # The command itself will complain about files that don't exist, so
1540 1541 # don't duplicate the message.
1541 1542 match = matchmod.badmatch(match, lambda fn, msg: None)
1542 1543 else:
1543 1544 match = matchall(repo)
1544 1545
1545 1546 fileprefetchhooks(repo, revs, match)
1546 1547
1547 1548 # a list of (repo, revs, match) prefetch functions
1548 1549 fileprefetchhooks = util.hooks()
1549 1550
1550 1551 # A marker that tells the evolve extension to suppress its own reporting
1551 1552 _reportstroubledchangesets = True
1552 1553
1553 1554 def registersummarycallback(repo, otr, txnname=''):
1554 1555 """register a callback to issue a summary after the transaction is closed
1555 1556 """
1556 1557 def txmatch(sources):
1557 1558 return any(txnname.startswith(source) for source in sources)
1558 1559
1559 1560 categories = []
1560 1561
1561 1562 def reportsummary(func):
1562 1563 """decorator for report callbacks."""
1563 1564 # The repoview life cycle is shorter than the one of the actual
1564 1565 # underlying repository. So the filtered object can die before the
1565 1566 # weakref is used leading to troubles. We keep a reference to the
1566 1567 # unfiltered object and restore the filtering when retrieving the
1567 1568 # repository through the weakref.
1568 1569 filtername = repo.filtername
1569 1570 reporef = weakref.ref(repo.unfiltered())
1570 1571 def wrapped(tr):
1571 1572 repo = reporef()
1572 1573 if filtername:
1573 1574 repo = repo.filtered(filtername)
1574 1575 func(repo, tr)
1575 1576 newcat = '%02i-txnreport' % len(categories)
1576 1577 otr.addpostclose(newcat, wrapped)
1577 1578 categories.append(newcat)
1578 1579 return wrapped
1579 1580
1580 1581 if txmatch(_reportobsoletedsource):
1581 1582 @reportsummary
1582 1583 def reportobsoleted(repo, tr):
1583 1584 obsoleted = obsutil.getobsoleted(repo, tr)
1584 1585 if obsoleted:
1585 1586 repo.ui.status(_('obsoleted %i changesets\n')
1586 1587 % len(obsoleted))
1587 1588
1588 1589 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1589 1590 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1590 1591 instabilitytypes = [
1591 1592 ('orphan', 'orphan'),
1592 1593 ('phase-divergent', 'phasedivergent'),
1593 1594 ('content-divergent', 'contentdivergent'),
1594 1595 ]
1595 1596
1596 1597 def getinstabilitycounts(repo):
1597 1598 filtered = repo.changelog.filteredrevs
1598 1599 counts = {}
1599 1600 for instability, revset in instabilitytypes:
1600 1601 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1601 1602 filtered)
1602 1603 return counts
1603 1604
1604 1605 oldinstabilitycounts = getinstabilitycounts(repo)
1605 1606 @reportsummary
1606 1607 def reportnewinstabilities(repo, tr):
1607 1608 newinstabilitycounts = getinstabilitycounts(repo)
1608 1609 for instability, revset in instabilitytypes:
1609 1610 delta = (newinstabilitycounts[instability] -
1610 1611 oldinstabilitycounts[instability])
1611 1612 msg = getinstabilitymessage(delta, instability)
1612 1613 if msg:
1613 1614 repo.ui.warn(msg)
1614 1615
1615 1616 if txmatch(_reportnewcssource):
1616 1617 @reportsummary
1617 1618 def reportnewcs(repo, tr):
1618 1619 """Report the range of new revisions pulled/unbundled."""
1619 1620 origrepolen = tr.changes.get('origrepolen', len(repo))
1620 1621 unfi = repo.unfiltered()
1621 1622 if origrepolen >= len(unfi):
1622 1623 return
1623 1624
1624 1625 # Compute the bounds of new visible revisions' range.
1625 1626 revs = smartset.spanset(repo, start=origrepolen)
1626 1627 if revs:
1627 1628 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1628 1629
1629 1630 if minrev == maxrev:
1630 1631 revrange = minrev
1631 1632 else:
1632 1633 revrange = '%s:%s' % (minrev, maxrev)
1633 1634 draft = len(repo.revs('%ld and draft()', revs))
1634 1635 secret = len(repo.revs('%ld and secret()', revs))
1635 1636 if not (draft or secret):
1636 1637 msg = _('new changesets %s\n') % revrange
1637 1638 elif draft and secret:
1638 1639 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1639 1640 msg %= (revrange, draft, secret)
1640 1641 elif draft:
1641 1642 msg = _('new changesets %s (%d drafts)\n')
1642 1643 msg %= (revrange, draft)
1643 1644 elif secret:
1644 1645 msg = _('new changesets %s (%d secrets)\n')
1645 1646 msg %= (revrange, secret)
1646 1647 else:
1647 1648 errormsg = 'entered unreachable condition'
1648 1649 raise error.ProgrammingError(errormsg)
1649 1650 repo.ui.status(msg)
1650 1651
1651 1652 # search new changesets directly pulled as obsolete
1652 1653 duplicates = tr.changes.get('revduplicates', ())
1653 1654 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1654 1655 origrepolen, duplicates)
1655 1656 cl = repo.changelog
1656 1657 extinctadded = [r for r in obsadded if r not in cl]
1657 1658 if extinctadded:
1658 1659 # They are not just obsolete, but obsolete and invisible
1659 1660 # we call them "extinct" internally but the terms have not been
1660 1661 # exposed to users.
1661 1662 msg = '(%d other changesets obsolete on arrival)\n'
1662 1663 repo.ui.status(msg % len(extinctadded))
1663 1664
1664 1665 @reportsummary
1665 1666 def reportphasechanges(repo, tr):
1666 1667 """Report statistics of phase changes for changesets pre-existing
1667 1668 pull/unbundle.
1668 1669 """
1669 1670 origrepolen = tr.changes.get('origrepolen', len(repo))
1670 1671 phasetracking = tr.changes.get('phases', {})
1671 1672 if not phasetracking:
1672 1673 return
1673 1674 published = [
1674 1675 rev for rev, (old, new) in phasetracking.iteritems()
1675 1676 if new == phases.public and rev < origrepolen
1676 1677 ]
1677 1678 if not published:
1678 1679 return
1679 1680 repo.ui.status(_('%d local changesets published\n')
1680 1681 % len(published))
1681 1682
1682 1683 def getinstabilitymessage(delta, instability):
1683 1684 """function to return the message to show warning about new instabilities
1684 1685
1685 1686 exists as a separate function so that extension can wrap to show more
1686 1687 information like how to fix instabilities"""
1687 1688 if delta > 0:
1688 1689 return _('%i new %s changesets\n') % (delta, instability)
1689 1690
1690 1691 def nodesummaries(repo, nodes, maxnumnodes=4):
1691 1692 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1692 1693 return ' '.join(short(h) for h in nodes)
1693 1694 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1694 1695 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1695 1696
1696 1697 def enforcesinglehead(repo, tr, desc):
1697 1698 """check that no named branch has multiple heads"""
1698 1699 if desc in ('strip', 'repair'):
1699 1700 # skip the logic during strip
1700 1701 return
1701 1702 visible = repo.filtered('visible')
1702 1703 # possible improvement: we could restrict the check to affected branch
1703 1704 for name, heads in visible.branchmap().iteritems():
1704 1705 if len(heads) > 1:
1705 1706 msg = _('rejecting multiple heads on branch "%s"')
1706 1707 msg %= name
1707 1708 hint = _('%d heads: %s')
1708 1709 hint %= (len(heads), nodesummaries(repo, heads))
1709 1710 raise error.Abort(msg, hint=hint)
1710 1711
1711 1712 def wrapconvertsink(sink):
1712 1713 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1713 1714 before it is used, whether or not the convert extension was formally loaded.
1714 1715 """
1715 1716 return sink
1716 1717
1717 1718 def unhidehashlikerevs(repo, specs, hiddentype):
1718 1719 """parse the user specs and unhide changesets whose hash or revision number
1719 1720 is passed.
1720 1721
1721 1722 hiddentype can be: 1) 'warn': warn while unhiding changesets
1722 1723 2) 'nowarn': don't warn while unhiding changesets
1723 1724
1724 1725 returns a repo object with the required changesets unhidden
1725 1726 """
1726 1727 if not repo.filtername or not repo.ui.configbool('experimental',
1727 1728 'directaccess'):
1728 1729 return repo
1729 1730
1730 1731 if repo.filtername not in ('visible', 'visible-hidden'):
1731 1732 return repo
1732 1733
1733 1734 symbols = set()
1734 1735 for spec in specs:
1735 1736 try:
1736 1737 tree = revsetlang.parse(spec)
1737 1738 except error.ParseError: # will be reported by scmutil.revrange()
1738 1739 continue
1739 1740
1740 1741 symbols.update(revsetlang.gethashlikesymbols(tree))
1741 1742
1742 1743 if not symbols:
1743 1744 return repo
1744 1745
1745 1746 revs = _getrevsfromsymbols(repo, symbols)
1746 1747
1747 1748 if not revs:
1748 1749 return repo
1749 1750
1750 1751 if hiddentype == 'warn':
1751 1752 unfi = repo.unfiltered()
1752 1753 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1753 1754 repo.ui.warn(_("warning: accessing hidden changesets for write "
1754 1755 "operation: %s\n") % revstr)
1755 1756
1756 1757 # we have to use new filtername to separate branch/tags cache until we can
1757 1758 # disbale these cache when revisions are dynamically pinned.
1758 1759 return repo.filtered('visible-hidden', revs)
1759 1760
1760 1761 def _getrevsfromsymbols(repo, symbols):
1761 1762 """parse the list of symbols and returns a set of revision numbers of hidden
1762 1763 changesets present in symbols"""
1763 1764 revs = set()
1764 1765 unfi = repo.unfiltered()
1765 1766 unficl = unfi.changelog
1766 1767 cl = repo.changelog
1767 1768 tiprev = len(unficl)
1768 1769 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1769 1770 for s in symbols:
1770 1771 try:
1771 1772 n = int(s)
1772 1773 if n <= tiprev:
1773 1774 if not allowrevnums:
1774 1775 continue
1775 1776 else:
1776 1777 if n not in cl:
1777 1778 revs.add(n)
1778 1779 continue
1779 1780 except ValueError:
1780 1781 pass
1781 1782
1782 1783 try:
1783 1784 s = resolvehexnodeidprefix(unfi, s)
1784 1785 except (error.LookupError, error.WdirUnsupported):
1785 1786 s = None
1786 1787
1787 1788 if s is not None:
1788 1789 rev = unficl.rev(s)
1789 1790 if rev not in cl:
1790 1791 revs.add(rev)
1791 1792
1792 1793 return revs
1793 1794
1794 1795 def bookmarkrevs(repo, mark):
1795 1796 """
1796 1797 Select revisions reachable by a given bookmark
1797 1798 """
1798 1799 return repo.revs("ancestors(bookmark(%s)) - "
1799 1800 "ancestors(head() and not bookmark(%s)) - "
1800 1801 "ancestors(bookmark() and not bookmark(%s))",
1801 1802 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now