##// END OF EJS Templates
changeset_printer: move _meaningful_parentrevs() to scmutil...
Yuya Nishihara -
r26433:3ad41638 default
parent child Browse files
Show More
@@ -1,3364 +1,3348 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, bin, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import encoding
17 17 import formatter
18 18 import crecord as crecordmod
19 19 import lock as lockmod
20 20
21 21 def ishunk(x):
22 22 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
23 23 return isinstance(x, hunkclasses)
24 24
25 25 def newandmodified(chunks, originalchunks):
26 26 newlyaddedandmodifiedfiles = set()
27 27 for chunk in chunks:
28 28 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
29 29 originalchunks:
30 30 newlyaddedandmodifiedfiles.add(chunk.header.filename())
31 31 return newlyaddedandmodifiedfiles
32 32
33 33 def parsealiases(cmd):
34 34 return cmd.lstrip("^").split("|")
35 35
36 36 def setupwrapcolorwrite(ui):
37 37 # wrap ui.write so diff output can be labeled/colorized
38 38 def wrapwrite(orig, *args, **kw):
39 39 label = kw.pop('label', '')
40 40 for chunk, l in patch.difflabel(lambda: args):
41 41 orig(chunk, label=label + l)
42 42
43 43 oldwrite = ui.write
44 44 def wrap(*args, **kwargs):
45 45 return wrapwrite(oldwrite, *args, **kwargs)
46 46 setattr(ui, 'write', wrap)
47 47 return oldwrite
48 48
49 49 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
50 50 if usecurses:
51 51 if testfile:
52 52 recordfn = crecordmod.testdecorator(testfile,
53 53 crecordmod.testchunkselector)
54 54 else:
55 55 recordfn = crecordmod.chunkselector
56 56
57 57 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
58 58
59 59 else:
60 60 return patch.filterpatch(ui, originalhunks, operation)
61 61
62 62 def recordfilter(ui, originalhunks, operation=None):
63 63 """ Prompts the user to filter the originalhunks and return a list of
64 64 selected hunks.
65 65 *operation* is used for ui purposes to indicate the user
66 66 what kind of filtering they are doing: reverting, commiting, shelving, etc.
67 67 *operation* has to be a translated string.
68 68 """
69 69 usecurses = ui.configbool('experimental', 'crecord', False)
70 70 testfile = ui.config('experimental', 'crecordtest', None)
71 71 oldwrite = setupwrapcolorwrite(ui)
72 72 try:
73 73 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
74 74 operation)
75 75 finally:
76 76 ui.write = oldwrite
77 77 return newchunks
78 78
79 79 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
80 80 filterfn, *pats, **opts):
81 81 import merge as mergemod
82 82
83 83 if not ui.interactive():
84 84 if cmdsuggest:
85 85 msg = _('running non-interactively, use %s instead') % cmdsuggest
86 86 else:
87 87 msg = _('running non-interactively')
88 88 raise util.Abort(msg)
89 89
90 90 # make sure username is set before going interactive
91 91 if not opts.get('user'):
92 92 ui.username() # raise exception, username not provided
93 93
94 94 def recordfunc(ui, repo, message, match, opts):
95 95 """This is generic record driver.
96 96
97 97 Its job is to interactively filter local changes, and
98 98 accordingly prepare working directory into a state in which the
99 99 job can be delegated to a non-interactive commit command such as
100 100 'commit' or 'qrefresh'.
101 101
102 102 After the actual job is done by non-interactive command, the
103 103 working directory is restored to its original state.
104 104
105 105 In the end we'll record interesting changes, and everything else
106 106 will be left in place, so the user can continue working.
107 107 """
108 108
109 109 checkunfinished(repo, commit=True)
110 110 merge = len(repo[None].parents()) > 1
111 111 if merge:
112 112 raise util.Abort(_('cannot partially commit a merge '
113 113 '(use "hg commit" instead)'))
114 114
115 115 status = repo.status(match=match)
116 116 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
117 117 diffopts.nodates = True
118 118 diffopts.git = True
119 119 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
120 120 originalchunks = patch.parsepatch(originaldiff)
121 121
122 122 # 1. filter patch, so we have intending-to apply subset of it
123 123 try:
124 124 chunks = filterfn(ui, originalchunks)
125 125 except patch.PatchError as err:
126 126 raise util.Abort(_('error parsing patch: %s') % err)
127 127
128 128 # We need to keep a backup of files that have been newly added and
129 129 # modified during the recording process because there is a previous
130 130 # version without the edit in the workdir
131 131 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
132 132 contenders = set()
133 133 for h in chunks:
134 134 try:
135 135 contenders.update(set(h.files()))
136 136 except AttributeError:
137 137 pass
138 138
139 139 changed = status.modified + status.added + status.removed
140 140 newfiles = [f for f in changed if f in contenders]
141 141 if not newfiles:
142 142 ui.status(_('no changes to record\n'))
143 143 return 0
144 144
145 145 modified = set(status.modified)
146 146
147 147 # 2. backup changed files, so we can restore them in the end
148 148
149 149 if backupall:
150 150 tobackup = changed
151 151 else:
152 152 tobackup = [f for f in newfiles if f in modified or f in \
153 153 newlyaddedandmodifiedfiles]
154 154 backups = {}
155 155 if tobackup:
156 156 backupdir = repo.join('record-backups')
157 157 try:
158 158 os.mkdir(backupdir)
159 159 except OSError as err:
160 160 if err.errno != errno.EEXIST:
161 161 raise
162 162 try:
163 163 # backup continues
164 164 for f in tobackup:
165 165 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
166 166 dir=backupdir)
167 167 os.close(fd)
168 168 ui.debug('backup %r as %r\n' % (f, tmpname))
169 169 util.copyfile(repo.wjoin(f), tmpname)
170 170 shutil.copystat(repo.wjoin(f), tmpname)
171 171 backups[f] = tmpname
172 172
173 173 fp = cStringIO.StringIO()
174 174 for c in chunks:
175 175 fname = c.filename()
176 176 if fname in backups:
177 177 c.write(fp)
178 178 dopatch = fp.tell()
179 179 fp.seek(0)
180 180
181 181 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
182 182 # 3a. apply filtered patch to clean repo (clean)
183 183 if backups:
184 184 # Equivalent to hg.revert
185 185 choices = lambda key: key in backups
186 186 mergemod.update(repo, repo.dirstate.p1(),
187 187 False, True, choices)
188 188
189 189 # 3b. (apply)
190 190 if dopatch:
191 191 try:
192 192 ui.debug('applying patch\n')
193 193 ui.debug(fp.getvalue())
194 194 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
195 195 except patch.PatchError as err:
196 196 raise util.Abort(str(err))
197 197 del fp
198 198
199 199 # 4. We prepared working directory according to filtered
200 200 # patch. Now is the time to delegate the job to
201 201 # commit/qrefresh or the like!
202 202
203 203 # Make all of the pathnames absolute.
204 204 newfiles = [repo.wjoin(nf) for nf in newfiles]
205 205 return commitfunc(ui, repo, *newfiles, **opts)
206 206 finally:
207 207 # 5. finally restore backed-up files
208 208 try:
209 209 dirstate = repo.dirstate
210 210 for realname, tmpname in backups.iteritems():
211 211 ui.debug('restoring %r to %r\n' % (tmpname, realname))
212 212
213 213 if dirstate[realname] == 'n':
214 214 # without normallookup, restoring timestamp
215 215 # may cause partially committed files
216 216 # to be treated as unmodified
217 217 dirstate.normallookup(realname)
218 218
219 219 util.copyfile(tmpname, repo.wjoin(realname))
220 220 # Our calls to copystat() here and above are a
221 221 # hack to trick any editors that have f open that
222 222 # we haven't modified them.
223 223 #
224 224 # Also note that this racy as an editor could
225 225 # notice the file's mtime before we've finished
226 226 # writing it.
227 227 shutil.copystat(tmpname, repo.wjoin(realname))
228 228 os.unlink(tmpname)
229 229 if tobackup:
230 230 os.rmdir(backupdir)
231 231 except OSError:
232 232 pass
233 233
234 234 def recordinwlock(ui, repo, message, match, opts):
235 235 wlock = repo.wlock()
236 236 try:
237 237 return recordfunc(ui, repo, message, match, opts)
238 238 finally:
239 239 wlock.release()
240 240
241 241 return commit(ui, repo, recordinwlock, pats, opts)
242 242
243 243 def findpossible(cmd, table, strict=False):
244 244 """
245 245 Return cmd -> (aliases, command table entry)
246 246 for each matching command.
247 247 Return debug commands (or their aliases) only if no normal command matches.
248 248 """
249 249 choice = {}
250 250 debugchoice = {}
251 251
252 252 if cmd in table:
253 253 # short-circuit exact matches, "log" alias beats "^log|history"
254 254 keys = [cmd]
255 255 else:
256 256 keys = table.keys()
257 257
258 258 allcmds = []
259 259 for e in keys:
260 260 aliases = parsealiases(e)
261 261 allcmds.extend(aliases)
262 262 found = None
263 263 if cmd in aliases:
264 264 found = cmd
265 265 elif not strict:
266 266 for a in aliases:
267 267 if a.startswith(cmd):
268 268 found = a
269 269 break
270 270 if found is not None:
271 271 if aliases[0].startswith("debug") or found.startswith("debug"):
272 272 debugchoice[found] = (aliases, table[e])
273 273 else:
274 274 choice[found] = (aliases, table[e])
275 275
276 276 if not choice and debugchoice:
277 277 choice = debugchoice
278 278
279 279 return choice, allcmds
280 280
281 281 def findcmd(cmd, table, strict=True):
282 282 """Return (aliases, command table entry) for command string."""
283 283 choice, allcmds = findpossible(cmd, table, strict)
284 284
285 285 if cmd in choice:
286 286 return choice[cmd]
287 287
288 288 if len(choice) > 1:
289 289 clist = choice.keys()
290 290 clist.sort()
291 291 raise error.AmbiguousCommand(cmd, clist)
292 292
293 293 if choice:
294 294 return choice.values()[0]
295 295
296 296 raise error.UnknownCommand(cmd, allcmds)
297 297
298 298 def findrepo(p):
299 299 while not os.path.isdir(os.path.join(p, ".hg")):
300 300 oldp, p = p, os.path.dirname(p)
301 301 if p == oldp:
302 302 return None
303 303
304 304 return p
305 305
306 306 def bailifchanged(repo, merge=True):
307 307 if merge and repo.dirstate.p2() != nullid:
308 308 raise util.Abort(_('outstanding uncommitted merge'))
309 309 modified, added, removed, deleted = repo.status()[:4]
310 310 if modified or added or removed or deleted:
311 311 raise util.Abort(_('uncommitted changes'))
312 312 ctx = repo[None]
313 313 for s in sorted(ctx.substate):
314 314 ctx.sub(s).bailifchanged()
315 315
316 316 def logmessage(ui, opts):
317 317 """ get the log message according to -m and -l option """
318 318 message = opts.get('message')
319 319 logfile = opts.get('logfile')
320 320
321 321 if message and logfile:
322 322 raise util.Abort(_('options --message and --logfile are mutually '
323 323 'exclusive'))
324 324 if not message and logfile:
325 325 try:
326 326 if logfile == '-':
327 327 message = ui.fin.read()
328 328 else:
329 329 message = '\n'.join(util.readfile(logfile).splitlines())
330 330 except IOError as inst:
331 331 raise util.Abort(_("can't read commit message '%s': %s") %
332 332 (logfile, inst.strerror))
333 333 return message
334 334
335 335 def mergeeditform(ctxorbool, baseformname):
336 336 """return appropriate editform name (referencing a committemplate)
337 337
338 338 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
339 339 merging is committed.
340 340
341 341 This returns baseformname with '.merge' appended if it is a merge,
342 342 otherwise '.normal' is appended.
343 343 """
344 344 if isinstance(ctxorbool, bool):
345 345 if ctxorbool:
346 346 return baseformname + ".merge"
347 347 elif 1 < len(ctxorbool.parents()):
348 348 return baseformname + ".merge"
349 349
350 350 return baseformname + ".normal"
351 351
352 352 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
353 353 editform='', **opts):
354 354 """get appropriate commit message editor according to '--edit' option
355 355
356 356 'finishdesc' is a function to be called with edited commit message
357 357 (= 'description' of the new changeset) just after editing, but
358 358 before checking empty-ness. It should return actual text to be
359 359 stored into history. This allows to change description before
360 360 storing.
361 361
362 362 'extramsg' is a extra message to be shown in the editor instead of
363 363 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
364 364 is automatically added.
365 365
366 366 'editform' is a dot-separated list of names, to distinguish
367 367 the purpose of commit text editing.
368 368
369 369 'getcommiteditor' returns 'commitforceeditor' regardless of
370 370 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
371 371 they are specific for usage in MQ.
372 372 """
373 373 if edit or finishdesc or extramsg:
374 374 return lambda r, c, s: commitforceeditor(r, c, s,
375 375 finishdesc=finishdesc,
376 376 extramsg=extramsg,
377 377 editform=editform)
378 378 elif editform:
379 379 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
380 380 else:
381 381 return commiteditor
382 382
383 383 def loglimit(opts):
384 384 """get the log limit according to option -l/--limit"""
385 385 limit = opts.get('limit')
386 386 if limit:
387 387 try:
388 388 limit = int(limit)
389 389 except ValueError:
390 390 raise util.Abort(_('limit must be a positive integer'))
391 391 if limit <= 0:
392 392 raise util.Abort(_('limit must be positive'))
393 393 else:
394 394 limit = None
395 395 return limit
396 396
397 397 def makefilename(repo, pat, node, desc=None,
398 398 total=None, seqno=None, revwidth=None, pathname=None):
399 399 node_expander = {
400 400 'H': lambda: hex(node),
401 401 'R': lambda: str(repo.changelog.rev(node)),
402 402 'h': lambda: short(node),
403 403 'm': lambda: re.sub('[^\w]', '_', str(desc))
404 404 }
405 405 expander = {
406 406 '%': lambda: '%',
407 407 'b': lambda: os.path.basename(repo.root),
408 408 }
409 409
410 410 try:
411 411 if node:
412 412 expander.update(node_expander)
413 413 if node:
414 414 expander['r'] = (lambda:
415 415 str(repo.changelog.rev(node)).zfill(revwidth or 0))
416 416 if total is not None:
417 417 expander['N'] = lambda: str(total)
418 418 if seqno is not None:
419 419 expander['n'] = lambda: str(seqno)
420 420 if total is not None and seqno is not None:
421 421 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
422 422 if pathname is not None:
423 423 expander['s'] = lambda: os.path.basename(pathname)
424 424 expander['d'] = lambda: os.path.dirname(pathname) or '.'
425 425 expander['p'] = lambda: pathname
426 426
427 427 newname = []
428 428 patlen = len(pat)
429 429 i = 0
430 430 while i < patlen:
431 431 c = pat[i]
432 432 if c == '%':
433 433 i += 1
434 434 c = pat[i]
435 435 c = expander[c]()
436 436 newname.append(c)
437 437 i += 1
438 438 return ''.join(newname)
439 439 except KeyError as inst:
440 440 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
441 441 inst.args[0])
442 442
443 443 def makefileobj(repo, pat, node=None, desc=None, total=None,
444 444 seqno=None, revwidth=None, mode='wb', modemap=None,
445 445 pathname=None):
446 446
447 447 writable = mode not in ('r', 'rb')
448 448
449 449 if not pat or pat == '-':
450 450 if writable:
451 451 fp = repo.ui.fout
452 452 else:
453 453 fp = repo.ui.fin
454 454 if util.safehasattr(fp, 'fileno'):
455 455 return os.fdopen(os.dup(fp.fileno()), mode)
456 456 else:
457 457 # if this fp can't be duped properly, return
458 458 # a dummy object that can be closed
459 459 class wrappedfileobj(object):
460 460 noop = lambda x: None
461 461 def __init__(self, f):
462 462 self.f = f
463 463 def __getattr__(self, attr):
464 464 if attr == 'close':
465 465 return self.noop
466 466 else:
467 467 return getattr(self.f, attr)
468 468
469 469 return wrappedfileobj(fp)
470 470 if util.safehasattr(pat, 'write') and writable:
471 471 return pat
472 472 if util.safehasattr(pat, 'read') and 'r' in mode:
473 473 return pat
474 474 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
475 475 if modemap is not None:
476 476 mode = modemap.get(fn, mode)
477 477 if mode == 'wb':
478 478 modemap[fn] = 'ab'
479 479 return open(fn, mode)
480 480
481 481 def openrevlog(repo, cmd, file_, opts):
482 482 """opens the changelog, manifest, a filelog or a given revlog"""
483 483 cl = opts['changelog']
484 484 mf = opts['manifest']
485 485 dir = opts['dir']
486 486 msg = None
487 487 if cl and mf:
488 488 msg = _('cannot specify --changelog and --manifest at the same time')
489 489 elif cl and dir:
490 490 msg = _('cannot specify --changelog and --dir at the same time')
491 491 elif cl or mf:
492 492 if file_:
493 493 msg = _('cannot specify filename with --changelog or --manifest')
494 494 elif not repo:
495 495 msg = _('cannot specify --changelog or --manifest or --dir '
496 496 'without a repository')
497 497 if msg:
498 498 raise util.Abort(msg)
499 499
500 500 r = None
501 501 if repo:
502 502 if cl:
503 503 r = repo.unfiltered().changelog
504 504 elif dir:
505 505 if 'treemanifest' not in repo.requirements:
506 506 raise util.Abort(_("--dir can only be used on repos with "
507 507 "treemanifest enabled"))
508 508 dirlog = repo.dirlog(file_)
509 509 if len(dirlog):
510 510 r = dirlog
511 511 elif mf:
512 512 r = repo.manifest
513 513 elif file_:
514 514 filelog = repo.file(file_)
515 515 if len(filelog):
516 516 r = filelog
517 517 if not r:
518 518 if not file_:
519 519 raise error.CommandError(cmd, _('invalid arguments'))
520 520 if not os.path.isfile(file_):
521 521 raise util.Abort(_("revlog '%s' not found") % file_)
522 522 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
523 523 file_[:-2] + ".i")
524 524 return r
525 525
526 526 def copy(ui, repo, pats, opts, rename=False):
527 527 # called with the repo lock held
528 528 #
529 529 # hgsep => pathname that uses "/" to separate directories
530 530 # ossep => pathname that uses os.sep to separate directories
531 531 cwd = repo.getcwd()
532 532 targets = {}
533 533 after = opts.get("after")
534 534 dryrun = opts.get("dry_run")
535 535 wctx = repo[None]
536 536
537 537 def walkpat(pat):
538 538 srcs = []
539 539 if after:
540 540 badstates = '?'
541 541 else:
542 542 badstates = '?r'
543 543 m = scmutil.match(repo[None], [pat], opts, globbed=True)
544 544 for abs in repo.walk(m):
545 545 state = repo.dirstate[abs]
546 546 rel = m.rel(abs)
547 547 exact = m.exact(abs)
548 548 if state in badstates:
549 549 if exact and state == '?':
550 550 ui.warn(_('%s: not copying - file is not managed\n') % rel)
551 551 if exact and state == 'r':
552 552 ui.warn(_('%s: not copying - file has been marked for'
553 553 ' remove\n') % rel)
554 554 continue
555 555 # abs: hgsep
556 556 # rel: ossep
557 557 srcs.append((abs, rel, exact))
558 558 return srcs
559 559
560 560 # abssrc: hgsep
561 561 # relsrc: ossep
562 562 # otarget: ossep
563 563 def copyfile(abssrc, relsrc, otarget, exact):
564 564 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
565 565 if '/' in abstarget:
566 566 # We cannot normalize abstarget itself, this would prevent
567 567 # case only renames, like a => A.
568 568 abspath, absname = abstarget.rsplit('/', 1)
569 569 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
570 570 reltarget = repo.pathto(abstarget, cwd)
571 571 target = repo.wjoin(abstarget)
572 572 src = repo.wjoin(abssrc)
573 573 state = repo.dirstate[abstarget]
574 574
575 575 scmutil.checkportable(ui, abstarget)
576 576
577 577 # check for collisions
578 578 prevsrc = targets.get(abstarget)
579 579 if prevsrc is not None:
580 580 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
581 581 (reltarget, repo.pathto(abssrc, cwd),
582 582 repo.pathto(prevsrc, cwd)))
583 583 return
584 584
585 585 # check for overwrites
586 586 exists = os.path.lexists(target)
587 587 samefile = False
588 588 if exists and abssrc != abstarget:
589 589 if (repo.dirstate.normalize(abssrc) ==
590 590 repo.dirstate.normalize(abstarget)):
591 591 if not rename:
592 592 ui.warn(_("%s: can't copy - same file\n") % reltarget)
593 593 return
594 594 exists = False
595 595 samefile = True
596 596
597 597 if not after and exists or after and state in 'mn':
598 598 if not opts['force']:
599 599 ui.warn(_('%s: not overwriting - file exists\n') %
600 600 reltarget)
601 601 return
602 602
603 603 if after:
604 604 if not exists:
605 605 if rename:
606 606 ui.warn(_('%s: not recording move - %s does not exist\n') %
607 607 (relsrc, reltarget))
608 608 else:
609 609 ui.warn(_('%s: not recording copy - %s does not exist\n') %
610 610 (relsrc, reltarget))
611 611 return
612 612 elif not dryrun:
613 613 try:
614 614 if exists:
615 615 os.unlink(target)
616 616 targetdir = os.path.dirname(target) or '.'
617 617 if not os.path.isdir(targetdir):
618 618 os.makedirs(targetdir)
619 619 if samefile:
620 620 tmp = target + "~hgrename"
621 621 os.rename(src, tmp)
622 622 os.rename(tmp, target)
623 623 else:
624 624 util.copyfile(src, target)
625 625 srcexists = True
626 626 except IOError as inst:
627 627 if inst.errno == errno.ENOENT:
628 628 ui.warn(_('%s: deleted in working directory\n') % relsrc)
629 629 srcexists = False
630 630 else:
631 631 ui.warn(_('%s: cannot copy - %s\n') %
632 632 (relsrc, inst.strerror))
633 633 return True # report a failure
634 634
635 635 if ui.verbose or not exact:
636 636 if rename:
637 637 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
638 638 else:
639 639 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
640 640
641 641 targets[abstarget] = abssrc
642 642
643 643 # fix up dirstate
644 644 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
645 645 dryrun=dryrun, cwd=cwd)
646 646 if rename and not dryrun:
647 647 if not after and srcexists and not samefile:
648 648 util.unlinkpath(repo.wjoin(abssrc))
649 649 wctx.forget([abssrc])
650 650
651 651 # pat: ossep
652 652 # dest ossep
653 653 # srcs: list of (hgsep, hgsep, ossep, bool)
654 654 # return: function that takes hgsep and returns ossep
655 655 def targetpathfn(pat, dest, srcs):
656 656 if os.path.isdir(pat):
657 657 abspfx = pathutil.canonpath(repo.root, cwd, pat)
658 658 abspfx = util.localpath(abspfx)
659 659 if destdirexists:
660 660 striplen = len(os.path.split(abspfx)[0])
661 661 else:
662 662 striplen = len(abspfx)
663 663 if striplen:
664 664 striplen += len(os.sep)
665 665 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
666 666 elif destdirexists:
667 667 res = lambda p: os.path.join(dest,
668 668 os.path.basename(util.localpath(p)))
669 669 else:
670 670 res = lambda p: dest
671 671 return res
672 672
673 673 # pat: ossep
674 674 # dest ossep
675 675 # srcs: list of (hgsep, hgsep, ossep, bool)
676 676 # return: function that takes hgsep and returns ossep
677 677 def targetpathafterfn(pat, dest, srcs):
678 678 if matchmod.patkind(pat):
679 679 # a mercurial pattern
680 680 res = lambda p: os.path.join(dest,
681 681 os.path.basename(util.localpath(p)))
682 682 else:
683 683 abspfx = pathutil.canonpath(repo.root, cwd, pat)
684 684 if len(abspfx) < len(srcs[0][0]):
685 685 # A directory. Either the target path contains the last
686 686 # component of the source path or it does not.
687 687 def evalpath(striplen):
688 688 score = 0
689 689 for s in srcs:
690 690 t = os.path.join(dest, util.localpath(s[0])[striplen:])
691 691 if os.path.lexists(t):
692 692 score += 1
693 693 return score
694 694
695 695 abspfx = util.localpath(abspfx)
696 696 striplen = len(abspfx)
697 697 if striplen:
698 698 striplen += len(os.sep)
699 699 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
700 700 score = evalpath(striplen)
701 701 striplen1 = len(os.path.split(abspfx)[0])
702 702 if striplen1:
703 703 striplen1 += len(os.sep)
704 704 if evalpath(striplen1) > score:
705 705 striplen = striplen1
706 706 res = lambda p: os.path.join(dest,
707 707 util.localpath(p)[striplen:])
708 708 else:
709 709 # a file
710 710 if destdirexists:
711 711 res = lambda p: os.path.join(dest,
712 712 os.path.basename(util.localpath(p)))
713 713 else:
714 714 res = lambda p: dest
715 715 return res
716 716
717 717 pats = scmutil.expandpats(pats)
718 718 if not pats:
719 719 raise util.Abort(_('no source or destination specified'))
720 720 if len(pats) == 1:
721 721 raise util.Abort(_('no destination specified'))
722 722 dest = pats.pop()
723 723 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
724 724 if not destdirexists:
725 725 if len(pats) > 1 or matchmod.patkind(pats[0]):
726 726 raise util.Abort(_('with multiple sources, destination must be an '
727 727 'existing directory'))
728 728 if util.endswithsep(dest):
729 729 raise util.Abort(_('destination %s is not a directory') % dest)
730 730
731 731 tfn = targetpathfn
732 732 if after:
733 733 tfn = targetpathafterfn
734 734 copylist = []
735 735 for pat in pats:
736 736 srcs = walkpat(pat)
737 737 if not srcs:
738 738 continue
739 739 copylist.append((tfn(pat, dest, srcs), srcs))
740 740 if not copylist:
741 741 raise util.Abort(_('no files to copy'))
742 742
743 743 errors = 0
744 744 for targetpath, srcs in copylist:
745 745 for abssrc, relsrc, exact in srcs:
746 746 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
747 747 errors += 1
748 748
749 749 if errors:
750 750 ui.warn(_('(consider using --after)\n'))
751 751
752 752 return errors != 0
753 753
754 754 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
755 755 runargs=None, appendpid=False):
756 756 '''Run a command as a service.'''
757 757
758 758 def writepid(pid):
759 759 if opts['pid_file']:
760 760 if appendpid:
761 761 mode = 'a'
762 762 else:
763 763 mode = 'w'
764 764 fp = open(opts['pid_file'], mode)
765 765 fp.write(str(pid) + '\n')
766 766 fp.close()
767 767
768 768 if opts['daemon'] and not opts['daemon_pipefds']:
769 769 # Signal child process startup with file removal
770 770 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
771 771 os.close(lockfd)
772 772 try:
773 773 if not runargs:
774 774 runargs = util.hgcmd() + sys.argv[1:]
775 775 runargs.append('--daemon-pipefds=%s' % lockpath)
776 776 # Don't pass --cwd to the child process, because we've already
777 777 # changed directory.
778 778 for i in xrange(1, len(runargs)):
779 779 if runargs[i].startswith('--cwd='):
780 780 del runargs[i]
781 781 break
782 782 elif runargs[i].startswith('--cwd'):
783 783 del runargs[i:i + 2]
784 784 break
785 785 def condfn():
786 786 return not os.path.exists(lockpath)
787 787 pid = util.rundetached(runargs, condfn)
788 788 if pid < 0:
789 789 raise util.Abort(_('child process failed to start'))
790 790 writepid(pid)
791 791 finally:
792 792 try:
793 793 os.unlink(lockpath)
794 794 except OSError as e:
795 795 if e.errno != errno.ENOENT:
796 796 raise
797 797 if parentfn:
798 798 return parentfn(pid)
799 799 else:
800 800 return
801 801
802 802 if initfn:
803 803 initfn()
804 804
805 805 if not opts['daemon']:
806 806 writepid(os.getpid())
807 807
808 808 if opts['daemon_pipefds']:
809 809 lockpath = opts['daemon_pipefds']
810 810 try:
811 811 os.setsid()
812 812 except AttributeError:
813 813 pass
814 814 os.unlink(lockpath)
815 815 util.hidewindow()
816 816 sys.stdout.flush()
817 817 sys.stderr.flush()
818 818
819 819 nullfd = os.open(os.devnull, os.O_RDWR)
820 820 logfilefd = nullfd
821 821 if logfile:
822 822 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
823 823 os.dup2(nullfd, 0)
824 824 os.dup2(logfilefd, 1)
825 825 os.dup2(logfilefd, 2)
826 826 if nullfd not in (0, 1, 2):
827 827 os.close(nullfd)
828 828 if logfile and logfilefd not in (0, 1, 2):
829 829 os.close(logfilefd)
830 830
831 831 if runfn:
832 832 return runfn()
833 833
834 834 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
835 835 """Utility function used by commands.import to import a single patch
836 836
837 837 This function is explicitly defined here to help the evolve extension to
838 838 wrap this part of the import logic.
839 839
840 840 The API is currently a bit ugly because it a simple code translation from
841 841 the import command. Feel free to make it better.
842 842
843 843 :hunk: a patch (as a binary string)
844 844 :parents: nodes that will be parent of the created commit
845 845 :opts: the full dict of option passed to the import command
846 846 :msgs: list to save commit message to.
847 847 (used in case we need to save it when failing)
848 848 :updatefunc: a function that update a repo to a given node
849 849 updatefunc(<repo>, <node>)
850 850 """
851 851 # avoid cycle context -> subrepo -> cmdutil
852 852 import context
853 853 tmpname, message, user, date, branch, nodeid, p1, p2 = \
854 854 patch.extract(ui, hunk)
855 855
856 856 update = not opts.get('bypass')
857 857 strip = opts["strip"]
858 858 prefix = opts["prefix"]
859 859 sim = float(opts.get('similarity') or 0)
860 860 if not tmpname:
861 861 return (None, None, False)
862 862 msg = _('applied to working directory')
863 863
864 864 rejects = False
865 865 dsguard = None
866 866
867 867 try:
868 868 cmdline_message = logmessage(ui, opts)
869 869 if cmdline_message:
870 870 # pickup the cmdline msg
871 871 message = cmdline_message
872 872 elif message:
873 873 # pickup the patch msg
874 874 message = message.strip()
875 875 else:
876 876 # launch the editor
877 877 message = None
878 878 ui.debug('message:\n%s\n' % message)
879 879
880 880 if len(parents) == 1:
881 881 parents.append(repo[nullid])
882 882 if opts.get('exact'):
883 883 if not nodeid or not p1:
884 884 raise util.Abort(_('not a Mercurial patch'))
885 885 p1 = repo[p1]
886 886 p2 = repo[p2 or nullid]
887 887 elif p2:
888 888 try:
889 889 p1 = repo[p1]
890 890 p2 = repo[p2]
891 891 # Without any options, consider p2 only if the
892 892 # patch is being applied on top of the recorded
893 893 # first parent.
894 894 if p1 != parents[0]:
895 895 p1 = parents[0]
896 896 p2 = repo[nullid]
897 897 except error.RepoError:
898 898 p1, p2 = parents
899 899 if p2.node() == nullid:
900 900 ui.warn(_("warning: import the patch as a normal revision\n"
901 901 "(use --exact to import the patch as a merge)\n"))
902 902 else:
903 903 p1, p2 = parents
904 904
905 905 n = None
906 906 if update:
907 907 dsguard = dirstateguard(repo, 'tryimportone')
908 908 if p1 != parents[0]:
909 909 updatefunc(repo, p1.node())
910 910 if p2 != parents[1]:
911 911 repo.setparents(p1.node(), p2.node())
912 912
913 913 if opts.get('exact') or opts.get('import_branch'):
914 914 repo.dirstate.setbranch(branch or 'default')
915 915
916 916 partial = opts.get('partial', False)
917 917 files = set()
918 918 try:
919 919 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
920 920 files=files, eolmode=None, similarity=sim / 100.0)
921 921 except patch.PatchError as e:
922 922 if not partial:
923 923 raise util.Abort(str(e))
924 924 if partial:
925 925 rejects = True
926 926
927 927 files = list(files)
928 928 if opts.get('no_commit'):
929 929 if message:
930 930 msgs.append(message)
931 931 else:
932 932 if opts.get('exact') or p2:
933 933 # If you got here, you either use --force and know what
934 934 # you are doing or used --exact or a merge patch while
935 935 # being updated to its first parent.
936 936 m = None
937 937 else:
938 938 m = scmutil.matchfiles(repo, files or [])
939 939 editform = mergeeditform(repo[None], 'import.normal')
940 940 if opts.get('exact'):
941 941 editor = None
942 942 else:
943 943 editor = getcommiteditor(editform=editform, **opts)
944 944 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
945 945 try:
946 946 if partial:
947 947 repo.ui.setconfig('ui', 'allowemptycommit', True)
948 948 n = repo.commit(message, opts.get('user') or user,
949 949 opts.get('date') or date, match=m,
950 950 editor=editor)
951 951 finally:
952 952 repo.ui.restoreconfig(allowemptyback)
953 953 dsguard.close()
954 954 else:
955 955 if opts.get('exact') or opts.get('import_branch'):
956 956 branch = branch or 'default'
957 957 else:
958 958 branch = p1.branch()
959 959 store = patch.filestore()
960 960 try:
961 961 files = set()
962 962 try:
963 963 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
964 964 files, eolmode=None)
965 965 except patch.PatchError as e:
966 966 raise util.Abort(str(e))
967 967 if opts.get('exact'):
968 968 editor = None
969 969 else:
970 970 editor = getcommiteditor(editform='import.bypass')
971 971 memctx = context.makememctx(repo, (p1.node(), p2.node()),
972 972 message,
973 973 opts.get('user') or user,
974 974 opts.get('date') or date,
975 975 branch, files, store,
976 976 editor=editor)
977 977 n = memctx.commit()
978 978 finally:
979 979 store.close()
980 980 if opts.get('exact') and opts.get('no_commit'):
981 981 # --exact with --no-commit is still useful in that it does merge
982 982 # and branch bits
983 983 ui.warn(_("warning: can't check exact import with --no-commit\n"))
984 984 elif opts.get('exact') and hex(n) != nodeid:
985 985 raise util.Abort(_('patch is damaged or loses information'))
986 986 if n:
987 987 # i18n: refers to a short changeset id
988 988 msg = _('created %s') % short(n)
989 989 return (msg, n, rejects)
990 990 finally:
991 991 lockmod.release(dsguard)
992 992 os.unlink(tmpname)
993 993
994 994 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
995 995 opts=None, match=None):
996 996 '''export changesets as hg patches.'''
997 997
998 998 total = len(revs)
999 999 revwidth = max([len(str(rev)) for rev in revs])
1000 1000 filemode = {}
1001 1001
1002 1002 def single(rev, seqno, fp):
1003 1003 ctx = repo[rev]
1004 1004 node = ctx.node()
1005 1005 parents = [p.node() for p in ctx.parents() if p]
1006 1006 branch = ctx.branch()
1007 1007 if switch_parent:
1008 1008 parents.reverse()
1009 1009
1010 1010 if parents:
1011 1011 prev = parents[0]
1012 1012 else:
1013 1013 prev = nullid
1014 1014
1015 1015 shouldclose = False
1016 1016 if not fp and len(template) > 0:
1017 1017 desc_lines = ctx.description().rstrip().split('\n')
1018 1018 desc = desc_lines[0] #Commit always has a first line.
1019 1019 fp = makefileobj(repo, template, node, desc=desc, total=total,
1020 1020 seqno=seqno, revwidth=revwidth, mode='wb',
1021 1021 modemap=filemode)
1022 1022 if fp != template:
1023 1023 shouldclose = True
1024 1024 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1025 1025 repo.ui.note("%s\n" % fp.name)
1026 1026
1027 1027 if not fp:
1028 1028 write = repo.ui.write
1029 1029 else:
1030 1030 def write(s, **kw):
1031 1031 fp.write(s)
1032 1032
1033 1033 write("# HG changeset patch\n")
1034 1034 write("# User %s\n" % ctx.user())
1035 1035 write("# Date %d %d\n" % ctx.date())
1036 1036 write("# %s\n" % util.datestr(ctx.date()))
1037 1037 if branch and branch != 'default':
1038 1038 write("# Branch %s\n" % branch)
1039 1039 write("# Node ID %s\n" % hex(node))
1040 1040 write("# Parent %s\n" % hex(prev))
1041 1041 if len(parents) > 1:
1042 1042 write("# Parent %s\n" % hex(parents[1]))
1043 1043 write(ctx.description().rstrip())
1044 1044 write("\n\n")
1045 1045
1046 1046 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1047 1047 write(chunk, label=label)
1048 1048
1049 1049 if shouldclose:
1050 1050 fp.close()
1051 1051
1052 1052 for seqno, rev in enumerate(revs):
1053 1053 single(rev, seqno + 1, fp)
1054 1054
1055 1055 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1056 1056 changes=None, stat=False, fp=None, prefix='',
1057 1057 root='', listsubrepos=False):
1058 1058 '''show diff or diffstat.'''
1059 1059 if fp is None:
1060 1060 write = ui.write
1061 1061 else:
1062 1062 def write(s, **kw):
1063 1063 fp.write(s)
1064 1064
1065 1065 if root:
1066 1066 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1067 1067 else:
1068 1068 relroot = ''
1069 1069 if relroot != '':
1070 1070 # XXX relative roots currently don't work if the root is within a
1071 1071 # subrepo
1072 1072 uirelroot = match.uipath(relroot)
1073 1073 relroot += '/'
1074 1074 for matchroot in match.files():
1075 1075 if not matchroot.startswith(relroot):
1076 1076 ui.warn(_('warning: %s not inside relative root %s\n') % (
1077 1077 match.uipath(matchroot), uirelroot))
1078 1078
1079 1079 if stat:
1080 1080 diffopts = diffopts.copy(context=0)
1081 1081 width = 80
1082 1082 if not ui.plain():
1083 1083 width = ui.termwidth()
1084 1084 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1085 1085 prefix=prefix, relroot=relroot)
1086 1086 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1087 1087 width=width,
1088 1088 git=diffopts.git):
1089 1089 write(chunk, label=label)
1090 1090 else:
1091 1091 for chunk, label in patch.diffui(repo, node1, node2, match,
1092 1092 changes, diffopts, prefix=prefix,
1093 1093 relroot=relroot):
1094 1094 write(chunk, label=label)
1095 1095
1096 1096 if listsubrepos:
1097 1097 ctx1 = repo[node1]
1098 1098 ctx2 = repo[node2]
1099 1099 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1100 1100 tempnode2 = node2
1101 1101 try:
1102 1102 if node2 is not None:
1103 1103 tempnode2 = ctx2.substate[subpath][1]
1104 1104 except KeyError:
1105 1105 # A subrepo that existed in node1 was deleted between node1 and
1106 1106 # node2 (inclusive). Thus, ctx2's substate won't contain that
1107 1107 # subpath. The best we can do is to ignore it.
1108 1108 tempnode2 = None
1109 1109 submatch = matchmod.narrowmatcher(subpath, match)
1110 1110 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1111 1111 stat=stat, fp=fp, prefix=prefix)
1112 1112
1113 1113 class changeset_printer(object):
1114 1114 '''show changeset information when templating not requested.'''
1115 1115
1116 1116 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1117 1117 self.ui = ui
1118 1118 self.repo = repo
1119 1119 self.buffered = buffered
1120 1120 self.matchfn = matchfn
1121 1121 self.diffopts = diffopts
1122 1122 self.header = {}
1123 1123 self.hunk = {}
1124 1124 self.lastheader = None
1125 1125 self.footer = None
1126 1126
1127 1127 def flush(self, ctx):
1128 1128 rev = ctx.rev()
1129 1129 if rev in self.header:
1130 1130 h = self.header[rev]
1131 1131 if h != self.lastheader:
1132 1132 self.lastheader = h
1133 1133 self.ui.write(h)
1134 1134 del self.header[rev]
1135 1135 if rev in self.hunk:
1136 1136 self.ui.write(self.hunk[rev])
1137 1137 del self.hunk[rev]
1138 1138 return 1
1139 1139 return 0
1140 1140
1141 1141 def close(self):
1142 1142 if self.footer:
1143 1143 self.ui.write(self.footer)
1144 1144
1145 1145 def show(self, ctx, copies=None, matchfn=None, **props):
1146 1146 if self.buffered:
1147 1147 self.ui.pushbuffer()
1148 1148 self._show(ctx, copies, matchfn, props)
1149 1149 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1150 1150 else:
1151 1151 self._show(ctx, copies, matchfn, props)
1152 1152
1153 1153 def _show(self, ctx, copies, matchfn, props):
1154 1154 '''show a single changeset or file revision'''
1155 1155 changenode = ctx.node()
1156 1156 rev = ctx.rev()
1157 1157 if self.ui.debugflag:
1158 1158 hexfunc = hex
1159 1159 else:
1160 1160 hexfunc = short
1161 1161 # as of now, wctx.node() and wctx.rev() return None, but we want to
1162 1162 # show the same values as {node} and {rev} templatekw
1163 1163 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1164 1164
1165 1165 if self.ui.quiet:
1166 1166 self.ui.write("%d:%s\n" % revnode, label='log.node')
1167 1167 return
1168 1168
1169 1169 date = util.datestr(ctx.date())
1170 1170
1171 1171 # i18n: column positioning for "hg log"
1172 1172 self.ui.write(_("changeset: %d:%s\n") % revnode,
1173 1173 label='log.changeset changeset.%s' % ctx.phasestr())
1174 1174
1175 1175 # branches are shown first before any other names due to backwards
1176 1176 # compatibility
1177 1177 branch = ctx.branch()
1178 1178 # don't show the default branch name
1179 1179 if branch != 'default':
1180 1180 # i18n: column positioning for "hg log"
1181 1181 self.ui.write(_("branch: %s\n") % branch,
1182 1182 label='log.branch')
1183 1183
1184 1184 for name, ns in self.repo.names.iteritems():
1185 1185 # branches has special logic already handled above, so here we just
1186 1186 # skip it
1187 1187 if name == 'branches':
1188 1188 continue
1189 1189 # we will use the templatename as the color name since those two
1190 1190 # should be the same
1191 1191 for name in ns.names(self.repo, changenode):
1192 1192 self.ui.write(ns.logfmt % name,
1193 1193 label='log.%s' % ns.colorname)
1194 1194 if self.ui.debugflag:
1195 1195 # i18n: column positioning for "hg log"
1196 1196 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1197 1197 label='log.phase')
1198 for pctx in self._meaningful_parentrevs(ctx):
1198 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1199 1199 label = 'log.parent changeset.%s' % pctx.phasestr()
1200 1200 # i18n: column positioning for "hg log"
1201 1201 self.ui.write(_("parent: %d:%s\n")
1202 1202 % (pctx.rev(), hexfunc(pctx.node())),
1203 1203 label=label)
1204 1204
1205 1205 if self.ui.debugflag and rev is not None:
1206 1206 mnode = ctx.manifestnode()
1207 1207 # i18n: column positioning for "hg log"
1208 1208 self.ui.write(_("manifest: %d:%s\n") %
1209 1209 (self.repo.manifest.rev(mnode), hex(mnode)),
1210 1210 label='ui.debug log.manifest')
1211 1211 # i18n: column positioning for "hg log"
1212 1212 self.ui.write(_("user: %s\n") % ctx.user(),
1213 1213 label='log.user')
1214 1214 # i18n: column positioning for "hg log"
1215 1215 self.ui.write(_("date: %s\n") % date,
1216 1216 label='log.date')
1217 1217
1218 1218 if self.ui.debugflag:
1219 1219 files = ctx.p1().status(ctx)[:3]
1220 1220 for key, value in zip([# i18n: column positioning for "hg log"
1221 1221 _("files:"),
1222 1222 # i18n: column positioning for "hg log"
1223 1223 _("files+:"),
1224 1224 # i18n: column positioning for "hg log"
1225 1225 _("files-:")], files):
1226 1226 if value:
1227 1227 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1228 1228 label='ui.debug log.files')
1229 1229 elif ctx.files() and self.ui.verbose:
1230 1230 # i18n: column positioning for "hg log"
1231 1231 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1232 1232 label='ui.note log.files')
1233 1233 if copies and self.ui.verbose:
1234 1234 copies = ['%s (%s)' % c for c in copies]
1235 1235 # i18n: column positioning for "hg log"
1236 1236 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1237 1237 label='ui.note log.copies')
1238 1238
1239 1239 extra = ctx.extra()
1240 1240 if extra and self.ui.debugflag:
1241 1241 for key, value in sorted(extra.items()):
1242 1242 # i18n: column positioning for "hg log"
1243 1243 self.ui.write(_("extra: %s=%s\n")
1244 1244 % (key, value.encode('string_escape')),
1245 1245 label='ui.debug log.extra')
1246 1246
1247 1247 description = ctx.description().strip()
1248 1248 if description:
1249 1249 if self.ui.verbose:
1250 1250 self.ui.write(_("description:\n"),
1251 1251 label='ui.note log.description')
1252 1252 self.ui.write(description,
1253 1253 label='ui.note log.description')
1254 1254 self.ui.write("\n\n")
1255 1255 else:
1256 1256 # i18n: column positioning for "hg log"
1257 1257 self.ui.write(_("summary: %s\n") %
1258 1258 description.splitlines()[0],
1259 1259 label='log.summary')
1260 1260 self.ui.write("\n")
1261 1261
1262 1262 self.showpatch(changenode, matchfn)
1263 1263
1264 1264 def showpatch(self, node, matchfn):
1265 1265 if not matchfn:
1266 1266 matchfn = self.matchfn
1267 1267 if matchfn:
1268 1268 stat = self.diffopts.get('stat')
1269 1269 diff = self.diffopts.get('patch')
1270 1270 diffopts = patch.diffallopts(self.ui, self.diffopts)
1271 1271 prev = self.repo.changelog.parents(node)[0]
1272 1272 if stat:
1273 1273 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1274 1274 match=matchfn, stat=True)
1275 1275 if diff:
1276 1276 if stat:
1277 1277 self.ui.write("\n")
1278 1278 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1279 1279 match=matchfn, stat=False)
1280 1280 self.ui.write("\n")
1281 1281
1282 def _meaningful_parentrevs(self, ctx):
1283 """Return list of meaningful (or all if debug) parentrevs for rev.
1284
1285 For merges (two non-nullrev revisions) both parents are meaningful.
1286 Otherwise the first parent revision is considered meaningful if it
1287 is not the preceding revision.
1288 """
1289 parents = ctx.parents()
1290 if len(parents) > 1:
1291 return parents
1292 if self.ui.debugflag:
1293 return [parents[0], self.repo['null']]
1294 if parents[0].rev() >= scmutil.intrev(ctx.rev()) - 1:
1295 return []
1296 return parents
1297
1298 1282 class jsonchangeset(changeset_printer):
1299 1283 '''format changeset information.'''
1300 1284
1301 1285 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1302 1286 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1303 1287 self.cache = {}
1304 1288 self._first = True
1305 1289
1306 1290 def close(self):
1307 1291 if not self._first:
1308 1292 self.ui.write("\n]\n")
1309 1293 else:
1310 1294 self.ui.write("[]\n")
1311 1295
1312 1296 def _show(self, ctx, copies, matchfn, props):
1313 1297 '''show a single changeset or file revision'''
1314 1298 rev = ctx.rev()
1315 1299 if rev is None:
1316 1300 jrev = jnode = 'null'
1317 1301 else:
1318 1302 jrev = str(rev)
1319 1303 jnode = '"%s"' % hex(ctx.node())
1320 1304 j = encoding.jsonescape
1321 1305
1322 1306 if self._first:
1323 1307 self.ui.write("[\n {")
1324 1308 self._first = False
1325 1309 else:
1326 1310 self.ui.write(",\n {")
1327 1311
1328 1312 if self.ui.quiet:
1329 1313 self.ui.write('\n "rev": %s' % jrev)
1330 1314 self.ui.write(',\n "node": %s' % jnode)
1331 1315 self.ui.write('\n }')
1332 1316 return
1333 1317
1334 1318 self.ui.write('\n "rev": %s' % jrev)
1335 1319 self.ui.write(',\n "node": %s' % jnode)
1336 1320 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1337 1321 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1338 1322 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1339 1323 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1340 1324 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1341 1325
1342 1326 self.ui.write(',\n "bookmarks": [%s]' %
1343 1327 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1344 1328 self.ui.write(',\n "tags": [%s]' %
1345 1329 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1346 1330 self.ui.write(',\n "parents": [%s]' %
1347 1331 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1348 1332
1349 1333 if self.ui.debugflag:
1350 1334 if rev is None:
1351 1335 jmanifestnode = 'null'
1352 1336 else:
1353 1337 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1354 1338 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1355 1339
1356 1340 self.ui.write(',\n "extra": {%s}' %
1357 1341 ", ".join('"%s": "%s"' % (j(k), j(v))
1358 1342 for k, v in ctx.extra().items()))
1359 1343
1360 1344 files = ctx.p1().status(ctx)
1361 1345 self.ui.write(',\n "modified": [%s]' %
1362 1346 ", ".join('"%s"' % j(f) for f in files[0]))
1363 1347 self.ui.write(',\n "added": [%s]' %
1364 1348 ", ".join('"%s"' % j(f) for f in files[1]))
1365 1349 self.ui.write(',\n "removed": [%s]' %
1366 1350 ", ".join('"%s"' % j(f) for f in files[2]))
1367 1351
1368 1352 elif self.ui.verbose:
1369 1353 self.ui.write(',\n "files": [%s]' %
1370 1354 ", ".join('"%s"' % j(f) for f in ctx.files()))
1371 1355
1372 1356 if copies:
1373 1357 self.ui.write(',\n "copies": {%s}' %
1374 1358 ", ".join('"%s": "%s"' % (j(k), j(v))
1375 1359 for k, v in copies))
1376 1360
1377 1361 matchfn = self.matchfn
1378 1362 if matchfn:
1379 1363 stat = self.diffopts.get('stat')
1380 1364 diff = self.diffopts.get('patch')
1381 1365 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1382 1366 node, prev = ctx.node(), ctx.p1().node()
1383 1367 if stat:
1384 1368 self.ui.pushbuffer()
1385 1369 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1386 1370 match=matchfn, stat=True)
1387 1371 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1388 1372 if diff:
1389 1373 self.ui.pushbuffer()
1390 1374 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1391 1375 match=matchfn, stat=False)
1392 1376 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1393 1377
1394 1378 self.ui.write("\n }")
1395 1379
1396 1380 class changeset_templater(changeset_printer):
1397 1381 '''format changeset information.'''
1398 1382
1399 1383 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1400 1384 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1401 1385 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1402 1386 defaulttempl = {
1403 1387 'parent': '{rev}:{node|formatnode} ',
1404 1388 'manifest': '{rev}:{node|formatnode}',
1405 1389 'file_copy': '{name} ({source})',
1406 1390 'extra': '{key}={value|stringescape}'
1407 1391 }
1408 1392 # filecopy is preserved for compatibility reasons
1409 1393 defaulttempl['filecopy'] = defaulttempl['file_copy']
1410 1394 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1411 1395 cache=defaulttempl)
1412 1396 if tmpl:
1413 1397 self.t.cache['changeset'] = tmpl
1414 1398
1415 1399 self.cache = {}
1416 1400
1417 1401 # find correct templates for current mode
1418 1402 tmplmodes = [
1419 1403 (True, None),
1420 1404 (self.ui.verbose, 'verbose'),
1421 1405 (self.ui.quiet, 'quiet'),
1422 1406 (self.ui.debugflag, 'debug'),
1423 1407 ]
1424 1408
1425 1409 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1426 1410 'docheader': '', 'docfooter': ''}
1427 1411 for mode, postfix in tmplmodes:
1428 1412 for t in self._parts:
1429 1413 cur = t
1430 1414 if postfix:
1431 1415 cur += "_" + postfix
1432 1416 if mode and cur in self.t:
1433 1417 self._parts[t] = cur
1434 1418
1435 1419 if self._parts['docheader']:
1436 1420 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1437 1421
1438 1422 def close(self):
1439 1423 if self._parts['docfooter']:
1440 1424 if not self.footer:
1441 1425 self.footer = ""
1442 1426 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1443 1427 return super(changeset_templater, self).close()
1444 1428
1445 1429 def _show(self, ctx, copies, matchfn, props):
1446 1430 '''show a single changeset or file revision'''
1447 1431
1448 1432 showlist = templatekw.showlist
1449 1433
1450 1434 # showparents() behavior depends on ui trace level which
1451 1435 # causes unexpected behaviors at templating level and makes
1452 1436 # it harder to extract it in a standalone function. Its
1453 1437 # behavior cannot be changed so leave it here for now.
1454 1438 def showparents(**args):
1455 1439 ctx = args['ctx']
1456 1440 parents = [[('rev', p.rev()),
1457 1441 ('node', p.hex()),
1458 1442 ('phase', p.phasestr())]
1459 for p in self._meaningful_parentrevs(ctx)]
1443 for p in scmutil.meaningfulparents(self.repo, ctx)]
1460 1444 return showlist('parent', parents, **args)
1461 1445
1462 1446 props = props.copy()
1463 1447 props.update(templatekw.keywords)
1464 1448 props['parents'] = showparents
1465 1449 props['templ'] = self.t
1466 1450 props['ctx'] = ctx
1467 1451 props['repo'] = self.repo
1468 1452 props['revcache'] = {'copies': copies}
1469 1453 props['cache'] = self.cache
1470 1454
1471 1455 try:
1472 1456 # write header
1473 1457 if self._parts['header']:
1474 1458 h = templater.stringify(self.t(self._parts['header'], **props))
1475 1459 if self.buffered:
1476 1460 self.header[ctx.rev()] = h
1477 1461 else:
1478 1462 if self.lastheader != h:
1479 1463 self.lastheader = h
1480 1464 self.ui.write(h)
1481 1465
1482 1466 # write changeset metadata, then patch if requested
1483 1467 key = self._parts['changeset']
1484 1468 self.ui.write(templater.stringify(self.t(key, **props)))
1485 1469 self.showpatch(ctx.node(), matchfn)
1486 1470
1487 1471 if self._parts['footer']:
1488 1472 if not self.footer:
1489 1473 self.footer = templater.stringify(
1490 1474 self.t(self._parts['footer'], **props))
1491 1475 except KeyError as inst:
1492 1476 msg = _("%s: no key named '%s'")
1493 1477 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1494 1478 except SyntaxError as inst:
1495 1479 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1496 1480
1497 1481 def gettemplate(ui, tmpl, style):
1498 1482 """
1499 1483 Find the template matching the given template spec or style.
1500 1484 """
1501 1485
1502 1486 # ui settings
1503 1487 if not tmpl and not style: # template are stronger than style
1504 1488 tmpl = ui.config('ui', 'logtemplate')
1505 1489 if tmpl:
1506 1490 try:
1507 1491 tmpl = templater.unquotestring(tmpl)
1508 1492 except SyntaxError:
1509 1493 pass
1510 1494 return tmpl, None
1511 1495 else:
1512 1496 style = util.expandpath(ui.config('ui', 'style', ''))
1513 1497
1514 1498 if not tmpl and style:
1515 1499 mapfile = style
1516 1500 if not os.path.split(mapfile)[0]:
1517 1501 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1518 1502 or templater.templatepath(mapfile))
1519 1503 if mapname:
1520 1504 mapfile = mapname
1521 1505 return None, mapfile
1522 1506
1523 1507 if not tmpl:
1524 1508 return None, None
1525 1509
1526 1510 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1527 1511
1528 1512 def show_changeset(ui, repo, opts, buffered=False):
1529 1513 """show one changeset using template or regular display.
1530 1514
1531 1515 Display format will be the first non-empty hit of:
1532 1516 1. option 'template'
1533 1517 2. option 'style'
1534 1518 3. [ui] setting 'logtemplate'
1535 1519 4. [ui] setting 'style'
1536 1520 If all of these values are either the unset or the empty string,
1537 1521 regular display via changeset_printer() is done.
1538 1522 """
1539 1523 # options
1540 1524 matchfn = None
1541 1525 if opts.get('patch') or opts.get('stat'):
1542 1526 matchfn = scmutil.matchall(repo)
1543 1527
1544 1528 if opts.get('template') == 'json':
1545 1529 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1546 1530
1547 1531 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1548 1532
1549 1533 if not tmpl and not mapfile:
1550 1534 return changeset_printer(ui, repo, matchfn, opts, buffered)
1551 1535
1552 1536 try:
1553 1537 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1554 1538 buffered)
1555 1539 except SyntaxError as inst:
1556 1540 raise util.Abort(inst.args[0])
1557 1541 return t
1558 1542
1559 1543 def showmarker(ui, marker):
1560 1544 """utility function to display obsolescence marker in a readable way
1561 1545
1562 1546 To be used by debug function."""
1563 1547 ui.write(hex(marker.precnode()))
1564 1548 for repl in marker.succnodes():
1565 1549 ui.write(' ')
1566 1550 ui.write(hex(repl))
1567 1551 ui.write(' %X ' % marker.flags())
1568 1552 parents = marker.parentnodes()
1569 1553 if parents is not None:
1570 1554 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1571 1555 ui.write('(%s) ' % util.datestr(marker.date()))
1572 1556 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1573 1557 sorted(marker.metadata().items())
1574 1558 if t[0] != 'date')))
1575 1559 ui.write('\n')
1576 1560
1577 1561 def finddate(ui, repo, date):
1578 1562 """Find the tipmost changeset that matches the given date spec"""
1579 1563
1580 1564 df = util.matchdate(date)
1581 1565 m = scmutil.matchall(repo)
1582 1566 results = {}
1583 1567
1584 1568 def prep(ctx, fns):
1585 1569 d = ctx.date()
1586 1570 if df(d[0]):
1587 1571 results[ctx.rev()] = d
1588 1572
1589 1573 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1590 1574 rev = ctx.rev()
1591 1575 if rev in results:
1592 1576 ui.status(_("found revision %s from %s\n") %
1593 1577 (rev, util.datestr(results[rev])))
1594 1578 return str(rev)
1595 1579
1596 1580 raise util.Abort(_("revision matching date not found"))
1597 1581
1598 1582 def increasingwindows(windowsize=8, sizelimit=512):
1599 1583 while True:
1600 1584 yield windowsize
1601 1585 if windowsize < sizelimit:
1602 1586 windowsize *= 2
1603 1587
1604 1588 class FileWalkError(Exception):
1605 1589 pass
1606 1590
1607 1591 def walkfilerevs(repo, match, follow, revs, fncache):
1608 1592 '''Walks the file history for the matched files.
1609 1593
1610 1594 Returns the changeset revs that are involved in the file history.
1611 1595
1612 1596 Throws FileWalkError if the file history can't be walked using
1613 1597 filelogs alone.
1614 1598 '''
1615 1599 wanted = set()
1616 1600 copies = []
1617 1601 minrev, maxrev = min(revs), max(revs)
1618 1602 def filerevgen(filelog, last):
1619 1603 """
1620 1604 Only files, no patterns. Check the history of each file.
1621 1605
1622 1606 Examines filelog entries within minrev, maxrev linkrev range
1623 1607 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1624 1608 tuples in backwards order
1625 1609 """
1626 1610 cl_count = len(repo)
1627 1611 revs = []
1628 1612 for j in xrange(0, last + 1):
1629 1613 linkrev = filelog.linkrev(j)
1630 1614 if linkrev < minrev:
1631 1615 continue
1632 1616 # only yield rev for which we have the changelog, it can
1633 1617 # happen while doing "hg log" during a pull or commit
1634 1618 if linkrev >= cl_count:
1635 1619 break
1636 1620
1637 1621 parentlinkrevs = []
1638 1622 for p in filelog.parentrevs(j):
1639 1623 if p != nullrev:
1640 1624 parentlinkrevs.append(filelog.linkrev(p))
1641 1625 n = filelog.node(j)
1642 1626 revs.append((linkrev, parentlinkrevs,
1643 1627 follow and filelog.renamed(n)))
1644 1628
1645 1629 return reversed(revs)
1646 1630 def iterfiles():
1647 1631 pctx = repo['.']
1648 1632 for filename in match.files():
1649 1633 if follow:
1650 1634 if filename not in pctx:
1651 1635 raise util.Abort(_('cannot follow file not in parent '
1652 1636 'revision: "%s"') % filename)
1653 1637 yield filename, pctx[filename].filenode()
1654 1638 else:
1655 1639 yield filename, None
1656 1640 for filename_node in copies:
1657 1641 yield filename_node
1658 1642
1659 1643 for file_, node in iterfiles():
1660 1644 filelog = repo.file(file_)
1661 1645 if not len(filelog):
1662 1646 if node is None:
1663 1647 # A zero count may be a directory or deleted file, so
1664 1648 # try to find matching entries on the slow path.
1665 1649 if follow:
1666 1650 raise util.Abort(
1667 1651 _('cannot follow nonexistent file: "%s"') % file_)
1668 1652 raise FileWalkError("Cannot walk via filelog")
1669 1653 else:
1670 1654 continue
1671 1655
1672 1656 if node is None:
1673 1657 last = len(filelog) - 1
1674 1658 else:
1675 1659 last = filelog.rev(node)
1676 1660
1677 1661 # keep track of all ancestors of the file
1678 1662 ancestors = set([filelog.linkrev(last)])
1679 1663
1680 1664 # iterate from latest to oldest revision
1681 1665 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1682 1666 if not follow:
1683 1667 if rev > maxrev:
1684 1668 continue
1685 1669 else:
1686 1670 # Note that last might not be the first interesting
1687 1671 # rev to us:
1688 1672 # if the file has been changed after maxrev, we'll
1689 1673 # have linkrev(last) > maxrev, and we still need
1690 1674 # to explore the file graph
1691 1675 if rev not in ancestors:
1692 1676 continue
1693 1677 # XXX insert 1327 fix here
1694 1678 if flparentlinkrevs:
1695 1679 ancestors.update(flparentlinkrevs)
1696 1680
1697 1681 fncache.setdefault(rev, []).append(file_)
1698 1682 wanted.add(rev)
1699 1683 if copied:
1700 1684 copies.append(copied)
1701 1685
1702 1686 return wanted
1703 1687
1704 1688 class _followfilter(object):
1705 1689 def __init__(self, repo, onlyfirst=False):
1706 1690 self.repo = repo
1707 1691 self.startrev = nullrev
1708 1692 self.roots = set()
1709 1693 self.onlyfirst = onlyfirst
1710 1694
1711 1695 def match(self, rev):
1712 1696 def realparents(rev):
1713 1697 if self.onlyfirst:
1714 1698 return self.repo.changelog.parentrevs(rev)[0:1]
1715 1699 else:
1716 1700 return filter(lambda x: x != nullrev,
1717 1701 self.repo.changelog.parentrevs(rev))
1718 1702
1719 1703 if self.startrev == nullrev:
1720 1704 self.startrev = rev
1721 1705 return True
1722 1706
1723 1707 if rev > self.startrev:
1724 1708 # forward: all descendants
1725 1709 if not self.roots:
1726 1710 self.roots.add(self.startrev)
1727 1711 for parent in realparents(rev):
1728 1712 if parent in self.roots:
1729 1713 self.roots.add(rev)
1730 1714 return True
1731 1715 else:
1732 1716 # backwards: all parents
1733 1717 if not self.roots:
1734 1718 self.roots.update(realparents(self.startrev))
1735 1719 if rev in self.roots:
1736 1720 self.roots.remove(rev)
1737 1721 self.roots.update(realparents(rev))
1738 1722 return True
1739 1723
1740 1724 return False
1741 1725
1742 1726 def walkchangerevs(repo, match, opts, prepare):
1743 1727 '''Iterate over files and the revs in which they changed.
1744 1728
1745 1729 Callers most commonly need to iterate backwards over the history
1746 1730 in which they are interested. Doing so has awful (quadratic-looking)
1747 1731 performance, so we use iterators in a "windowed" way.
1748 1732
1749 1733 We walk a window of revisions in the desired order. Within the
1750 1734 window, we first walk forwards to gather data, then in the desired
1751 1735 order (usually backwards) to display it.
1752 1736
1753 1737 This function returns an iterator yielding contexts. Before
1754 1738 yielding each context, the iterator will first call the prepare
1755 1739 function on each context in the window in forward order.'''
1756 1740
1757 1741 follow = opts.get('follow') or opts.get('follow_first')
1758 1742 revs = _logrevs(repo, opts)
1759 1743 if not revs:
1760 1744 return []
1761 1745 wanted = set()
1762 1746 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1763 1747 opts.get('removed'))
1764 1748 fncache = {}
1765 1749 change = repo.changectx
1766 1750
1767 1751 # First step is to fill wanted, the set of revisions that we want to yield.
1768 1752 # When it does not induce extra cost, we also fill fncache for revisions in
1769 1753 # wanted: a cache of filenames that were changed (ctx.files()) and that
1770 1754 # match the file filtering conditions.
1771 1755
1772 1756 if match.always():
1773 1757 # No files, no patterns. Display all revs.
1774 1758 wanted = revs
1775 1759 elif not slowpath:
1776 1760 # We only have to read through the filelog to find wanted revisions
1777 1761
1778 1762 try:
1779 1763 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1780 1764 except FileWalkError:
1781 1765 slowpath = True
1782 1766
1783 1767 # We decided to fall back to the slowpath because at least one
1784 1768 # of the paths was not a file. Check to see if at least one of them
1785 1769 # existed in history, otherwise simply return
1786 1770 for path in match.files():
1787 1771 if path == '.' or path in repo.store:
1788 1772 break
1789 1773 else:
1790 1774 return []
1791 1775
1792 1776 if slowpath:
1793 1777 # We have to read the changelog to match filenames against
1794 1778 # changed files
1795 1779
1796 1780 if follow:
1797 1781 raise util.Abort(_('can only follow copies/renames for explicit '
1798 1782 'filenames'))
1799 1783
1800 1784 # The slow path checks files modified in every changeset.
1801 1785 # This is really slow on large repos, so compute the set lazily.
1802 1786 class lazywantedset(object):
1803 1787 def __init__(self):
1804 1788 self.set = set()
1805 1789 self.revs = set(revs)
1806 1790
1807 1791 # No need to worry about locality here because it will be accessed
1808 1792 # in the same order as the increasing window below.
1809 1793 def __contains__(self, value):
1810 1794 if value in self.set:
1811 1795 return True
1812 1796 elif not value in self.revs:
1813 1797 return False
1814 1798 else:
1815 1799 self.revs.discard(value)
1816 1800 ctx = change(value)
1817 1801 matches = filter(match, ctx.files())
1818 1802 if matches:
1819 1803 fncache[value] = matches
1820 1804 self.set.add(value)
1821 1805 return True
1822 1806 return False
1823 1807
1824 1808 def discard(self, value):
1825 1809 self.revs.discard(value)
1826 1810 self.set.discard(value)
1827 1811
1828 1812 wanted = lazywantedset()
1829 1813
1830 1814 # it might be worthwhile to do this in the iterator if the rev range
1831 1815 # is descending and the prune args are all within that range
1832 1816 for rev in opts.get('prune', ()):
1833 1817 rev = repo[rev].rev()
1834 1818 ff = _followfilter(repo)
1835 1819 stop = min(revs[0], revs[-1])
1836 1820 for x in xrange(rev, stop - 1, -1):
1837 1821 if ff.match(x):
1838 1822 wanted = wanted - [x]
1839 1823
1840 1824 # Now that wanted is correctly initialized, we can iterate over the
1841 1825 # revision range, yielding only revisions in wanted.
1842 1826 def iterate():
1843 1827 if follow and match.always():
1844 1828 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1845 1829 def want(rev):
1846 1830 return ff.match(rev) and rev in wanted
1847 1831 else:
1848 1832 def want(rev):
1849 1833 return rev in wanted
1850 1834
1851 1835 it = iter(revs)
1852 1836 stopiteration = False
1853 1837 for windowsize in increasingwindows():
1854 1838 nrevs = []
1855 1839 for i in xrange(windowsize):
1856 1840 rev = next(it, None)
1857 1841 if rev is None:
1858 1842 stopiteration = True
1859 1843 break
1860 1844 elif want(rev):
1861 1845 nrevs.append(rev)
1862 1846 for rev in sorted(nrevs):
1863 1847 fns = fncache.get(rev)
1864 1848 ctx = change(rev)
1865 1849 if not fns:
1866 1850 def fns_generator():
1867 1851 for f in ctx.files():
1868 1852 if match(f):
1869 1853 yield f
1870 1854 fns = fns_generator()
1871 1855 prepare(ctx, fns)
1872 1856 for rev in nrevs:
1873 1857 yield change(rev)
1874 1858
1875 1859 if stopiteration:
1876 1860 break
1877 1861
1878 1862 return iterate()
1879 1863
1880 1864 def _makefollowlogfilematcher(repo, files, followfirst):
1881 1865 # When displaying a revision with --patch --follow FILE, we have
1882 1866 # to know which file of the revision must be diffed. With
1883 1867 # --follow, we want the names of the ancestors of FILE in the
1884 1868 # revision, stored in "fcache". "fcache" is populated by
1885 1869 # reproducing the graph traversal already done by --follow revset
1886 1870 # and relating linkrevs to file names (which is not "correct" but
1887 1871 # good enough).
1888 1872 fcache = {}
1889 1873 fcacheready = [False]
1890 1874 pctx = repo['.']
1891 1875
1892 1876 def populate():
1893 1877 for fn in files:
1894 1878 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1895 1879 for c in i:
1896 1880 fcache.setdefault(c.linkrev(), set()).add(c.path())
1897 1881
1898 1882 def filematcher(rev):
1899 1883 if not fcacheready[0]:
1900 1884 # Lazy initialization
1901 1885 fcacheready[0] = True
1902 1886 populate()
1903 1887 return scmutil.matchfiles(repo, fcache.get(rev, []))
1904 1888
1905 1889 return filematcher
1906 1890
1907 1891 def _makenofollowlogfilematcher(repo, pats, opts):
1908 1892 '''hook for extensions to override the filematcher for non-follow cases'''
1909 1893 return None
1910 1894
1911 1895 def _makelogrevset(repo, pats, opts, revs):
1912 1896 """Return (expr, filematcher) where expr is a revset string built
1913 1897 from log options and file patterns or None. If --stat or --patch
1914 1898 are not passed filematcher is None. Otherwise it is a callable
1915 1899 taking a revision number and returning a match objects filtering
1916 1900 the files to be detailed when displaying the revision.
1917 1901 """
1918 1902 opt2revset = {
1919 1903 'no_merges': ('not merge()', None),
1920 1904 'only_merges': ('merge()', None),
1921 1905 '_ancestors': ('ancestors(%(val)s)', None),
1922 1906 '_fancestors': ('_firstancestors(%(val)s)', None),
1923 1907 '_descendants': ('descendants(%(val)s)', None),
1924 1908 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1925 1909 '_matchfiles': ('_matchfiles(%(val)s)', None),
1926 1910 'date': ('date(%(val)r)', None),
1927 1911 'branch': ('branch(%(val)r)', ' or '),
1928 1912 '_patslog': ('filelog(%(val)r)', ' or '),
1929 1913 '_patsfollow': ('follow(%(val)r)', ' or '),
1930 1914 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1931 1915 'keyword': ('keyword(%(val)r)', ' or '),
1932 1916 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1933 1917 'user': ('user(%(val)r)', ' or '),
1934 1918 }
1935 1919
1936 1920 opts = dict(opts)
1937 1921 # follow or not follow?
1938 1922 follow = opts.get('follow') or opts.get('follow_first')
1939 1923 if opts.get('follow_first'):
1940 1924 followfirst = 1
1941 1925 else:
1942 1926 followfirst = 0
1943 1927 # --follow with FILE behavior depends on revs...
1944 1928 it = iter(revs)
1945 1929 startrev = it.next()
1946 1930 followdescendants = startrev < next(it, startrev)
1947 1931
1948 1932 # branch and only_branch are really aliases and must be handled at
1949 1933 # the same time
1950 1934 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1951 1935 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1952 1936 # pats/include/exclude are passed to match.match() directly in
1953 1937 # _matchfiles() revset but walkchangerevs() builds its matcher with
1954 1938 # scmutil.match(). The difference is input pats are globbed on
1955 1939 # platforms without shell expansion (windows).
1956 1940 wctx = repo[None]
1957 1941 match, pats = scmutil.matchandpats(wctx, pats, opts)
1958 1942 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1959 1943 opts.get('removed'))
1960 1944 if not slowpath:
1961 1945 for f in match.files():
1962 1946 if follow and f not in wctx:
1963 1947 # If the file exists, it may be a directory, so let it
1964 1948 # take the slow path.
1965 1949 if os.path.exists(repo.wjoin(f)):
1966 1950 slowpath = True
1967 1951 continue
1968 1952 else:
1969 1953 raise util.Abort(_('cannot follow file not in parent '
1970 1954 'revision: "%s"') % f)
1971 1955 filelog = repo.file(f)
1972 1956 if not filelog:
1973 1957 # A zero count may be a directory or deleted file, so
1974 1958 # try to find matching entries on the slow path.
1975 1959 if follow:
1976 1960 raise util.Abort(
1977 1961 _('cannot follow nonexistent file: "%s"') % f)
1978 1962 slowpath = True
1979 1963
1980 1964 # We decided to fall back to the slowpath because at least one
1981 1965 # of the paths was not a file. Check to see if at least one of them
1982 1966 # existed in history - in that case, we'll continue down the
1983 1967 # slowpath; otherwise, we can turn off the slowpath
1984 1968 if slowpath:
1985 1969 for path in match.files():
1986 1970 if path == '.' or path in repo.store:
1987 1971 break
1988 1972 else:
1989 1973 slowpath = False
1990 1974
1991 1975 fpats = ('_patsfollow', '_patsfollowfirst')
1992 1976 fnopats = (('_ancestors', '_fancestors'),
1993 1977 ('_descendants', '_fdescendants'))
1994 1978 if slowpath:
1995 1979 # See walkchangerevs() slow path.
1996 1980 #
1997 1981 # pats/include/exclude cannot be represented as separate
1998 1982 # revset expressions as their filtering logic applies at file
1999 1983 # level. For instance "-I a -X a" matches a revision touching
2000 1984 # "a" and "b" while "file(a) and not file(b)" does
2001 1985 # not. Besides, filesets are evaluated against the working
2002 1986 # directory.
2003 1987 matchargs = ['r:', 'd:relpath']
2004 1988 for p in pats:
2005 1989 matchargs.append('p:' + p)
2006 1990 for p in opts.get('include', []):
2007 1991 matchargs.append('i:' + p)
2008 1992 for p in opts.get('exclude', []):
2009 1993 matchargs.append('x:' + p)
2010 1994 matchargs = ','.join(('%r' % p) for p in matchargs)
2011 1995 opts['_matchfiles'] = matchargs
2012 1996 if follow:
2013 1997 opts[fnopats[0][followfirst]] = '.'
2014 1998 else:
2015 1999 if follow:
2016 2000 if pats:
2017 2001 # follow() revset interprets its file argument as a
2018 2002 # manifest entry, so use match.files(), not pats.
2019 2003 opts[fpats[followfirst]] = list(match.files())
2020 2004 else:
2021 2005 op = fnopats[followdescendants][followfirst]
2022 2006 opts[op] = 'rev(%d)' % startrev
2023 2007 else:
2024 2008 opts['_patslog'] = list(pats)
2025 2009
2026 2010 filematcher = None
2027 2011 if opts.get('patch') or opts.get('stat'):
2028 2012 # When following files, track renames via a special matcher.
2029 2013 # If we're forced to take the slowpath it means we're following
2030 2014 # at least one pattern/directory, so don't bother with rename tracking.
2031 2015 if follow and not match.always() and not slowpath:
2032 2016 # _makefollowlogfilematcher expects its files argument to be
2033 2017 # relative to the repo root, so use match.files(), not pats.
2034 2018 filematcher = _makefollowlogfilematcher(repo, match.files(),
2035 2019 followfirst)
2036 2020 else:
2037 2021 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2038 2022 if filematcher is None:
2039 2023 filematcher = lambda rev: match
2040 2024
2041 2025 expr = []
2042 2026 for op, val in sorted(opts.iteritems()):
2043 2027 if not val:
2044 2028 continue
2045 2029 if op not in opt2revset:
2046 2030 continue
2047 2031 revop, andor = opt2revset[op]
2048 2032 if '%(val)' not in revop:
2049 2033 expr.append(revop)
2050 2034 else:
2051 2035 if not isinstance(val, list):
2052 2036 e = revop % {'val': val}
2053 2037 else:
2054 2038 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2055 2039 expr.append(e)
2056 2040
2057 2041 if expr:
2058 2042 expr = '(' + ' and '.join(expr) + ')'
2059 2043 else:
2060 2044 expr = None
2061 2045 return expr, filematcher
2062 2046
2063 2047 def _logrevs(repo, opts):
2064 2048 # Default --rev value depends on --follow but --follow behavior
2065 2049 # depends on revisions resolved from --rev...
2066 2050 follow = opts.get('follow') or opts.get('follow_first')
2067 2051 if opts.get('rev'):
2068 2052 revs = scmutil.revrange(repo, opts['rev'])
2069 2053 elif follow and repo.dirstate.p1() == nullid:
2070 2054 revs = revset.baseset()
2071 2055 elif follow:
2072 2056 revs = repo.revs('reverse(:.)')
2073 2057 else:
2074 2058 revs = revset.spanset(repo)
2075 2059 revs.reverse()
2076 2060 return revs
2077 2061
2078 2062 def getgraphlogrevs(repo, pats, opts):
2079 2063 """Return (revs, expr, filematcher) where revs is an iterable of
2080 2064 revision numbers, expr is a revset string built from log options
2081 2065 and file patterns or None, and used to filter 'revs'. If --stat or
2082 2066 --patch are not passed filematcher is None. Otherwise it is a
2083 2067 callable taking a revision number and returning a match objects
2084 2068 filtering the files to be detailed when displaying the revision.
2085 2069 """
2086 2070 limit = loglimit(opts)
2087 2071 revs = _logrevs(repo, opts)
2088 2072 if not revs:
2089 2073 return revset.baseset(), None, None
2090 2074 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2091 2075 if opts.get('rev'):
2092 2076 # User-specified revs might be unsorted, but don't sort before
2093 2077 # _makelogrevset because it might depend on the order of revs
2094 2078 revs.sort(reverse=True)
2095 2079 if expr:
2096 2080 # Revset matchers often operate faster on revisions in changelog
2097 2081 # order, because most filters deal with the changelog.
2098 2082 revs.reverse()
2099 2083 matcher = revset.match(repo.ui, expr)
2100 2084 # Revset matches can reorder revisions. "A or B" typically returns
2101 2085 # returns the revision matching A then the revision matching B. Sort
2102 2086 # again to fix that.
2103 2087 revs = matcher(repo, revs)
2104 2088 revs.sort(reverse=True)
2105 2089 if limit is not None:
2106 2090 limitedrevs = []
2107 2091 for idx, rev in enumerate(revs):
2108 2092 if idx >= limit:
2109 2093 break
2110 2094 limitedrevs.append(rev)
2111 2095 revs = revset.baseset(limitedrevs)
2112 2096
2113 2097 return revs, expr, filematcher
2114 2098
2115 2099 def getlogrevs(repo, pats, opts):
2116 2100 """Return (revs, expr, filematcher) where revs is an iterable of
2117 2101 revision numbers, expr is a revset string built from log options
2118 2102 and file patterns or None, and used to filter 'revs'. If --stat or
2119 2103 --patch are not passed filematcher is None. Otherwise it is a
2120 2104 callable taking a revision number and returning a match objects
2121 2105 filtering the files to be detailed when displaying the revision.
2122 2106 """
2123 2107 limit = loglimit(opts)
2124 2108 revs = _logrevs(repo, opts)
2125 2109 if not revs:
2126 2110 return revset.baseset([]), None, None
2127 2111 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2128 2112 if expr:
2129 2113 # Revset matchers often operate faster on revisions in changelog
2130 2114 # order, because most filters deal with the changelog.
2131 2115 if not opts.get('rev'):
2132 2116 revs.reverse()
2133 2117 matcher = revset.match(repo.ui, expr)
2134 2118 # Revset matches can reorder revisions. "A or B" typically returns
2135 2119 # returns the revision matching A then the revision matching B. Sort
2136 2120 # again to fix that.
2137 2121 revs = matcher(repo, revs)
2138 2122 if not opts.get('rev'):
2139 2123 revs.sort(reverse=True)
2140 2124 if limit is not None:
2141 2125 limitedrevs = []
2142 2126 for idx, r in enumerate(revs):
2143 2127 if limit <= idx:
2144 2128 break
2145 2129 limitedrevs.append(r)
2146 2130 revs = revset.baseset(limitedrevs)
2147 2131
2148 2132 return revs, expr, filematcher
2149 2133
2150 2134 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2151 2135 filematcher=None):
2152 2136 seen, state = [], graphmod.asciistate()
2153 2137 for rev, type, ctx, parents in dag:
2154 2138 char = 'o'
2155 2139 if ctx.node() in showparents:
2156 2140 char = '@'
2157 2141 elif ctx.obsolete():
2158 2142 char = 'x'
2159 2143 elif ctx.closesbranch():
2160 2144 char = '_'
2161 2145 copies = None
2162 2146 if getrenamed and ctx.rev():
2163 2147 copies = []
2164 2148 for fn in ctx.files():
2165 2149 rename = getrenamed(fn, ctx.rev())
2166 2150 if rename:
2167 2151 copies.append((fn, rename[0]))
2168 2152 revmatchfn = None
2169 2153 if filematcher is not None:
2170 2154 revmatchfn = filematcher(ctx.rev())
2171 2155 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2172 2156 lines = displayer.hunk.pop(rev).split('\n')
2173 2157 if not lines[-1]:
2174 2158 del lines[-1]
2175 2159 displayer.flush(ctx)
2176 2160 edges = edgefn(type, char, lines, seen, rev, parents)
2177 2161 for type, char, lines, coldata in edges:
2178 2162 graphmod.ascii(ui, state, type, char, lines, coldata)
2179 2163 displayer.close()
2180 2164
2181 2165 def graphlog(ui, repo, *pats, **opts):
2182 2166 # Parameters are identical to log command ones
2183 2167 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2184 2168 revdag = graphmod.dagwalker(repo, revs)
2185 2169
2186 2170 getrenamed = None
2187 2171 if opts.get('copies'):
2188 2172 endrev = None
2189 2173 if opts.get('rev'):
2190 2174 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2191 2175 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2192 2176 displayer = show_changeset(ui, repo, opts, buffered=True)
2193 2177 showparents = [ctx.node() for ctx in repo[None].parents()]
2194 2178 displaygraph(ui, revdag, displayer, showparents,
2195 2179 graphmod.asciiedges, getrenamed, filematcher)
2196 2180
2197 2181 def checkunsupportedgraphflags(pats, opts):
2198 2182 for op in ["newest_first"]:
2199 2183 if op in opts and opts[op]:
2200 2184 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2201 2185 % op.replace("_", "-"))
2202 2186
2203 2187 def graphrevs(repo, nodes, opts):
2204 2188 limit = loglimit(opts)
2205 2189 nodes.reverse()
2206 2190 if limit is not None:
2207 2191 nodes = nodes[:limit]
2208 2192 return graphmod.nodes(repo, nodes)
2209 2193
2210 2194 def add(ui, repo, match, prefix, explicitonly, **opts):
2211 2195 join = lambda f: os.path.join(prefix, f)
2212 2196 bad = []
2213 2197
2214 2198 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2215 2199 names = []
2216 2200 wctx = repo[None]
2217 2201 cca = None
2218 2202 abort, warn = scmutil.checkportabilityalert(ui)
2219 2203 if abort or warn:
2220 2204 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2221 2205
2222 2206 badmatch = matchmod.badmatch(match, badfn)
2223 2207 dirstate = repo.dirstate
2224 2208 # We don't want to just call wctx.walk here, since it would return a lot of
2225 2209 # clean files, which we aren't interested in and takes time.
2226 2210 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2227 2211 True, False, full=False)):
2228 2212 exact = match.exact(f)
2229 2213 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2230 2214 if cca:
2231 2215 cca(f)
2232 2216 names.append(f)
2233 2217 if ui.verbose or not exact:
2234 2218 ui.status(_('adding %s\n') % match.rel(f))
2235 2219
2236 2220 for subpath in sorted(wctx.substate):
2237 2221 sub = wctx.sub(subpath)
2238 2222 try:
2239 2223 submatch = matchmod.narrowmatcher(subpath, match)
2240 2224 if opts.get('subrepos'):
2241 2225 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2242 2226 else:
2243 2227 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2244 2228 except error.LookupError:
2245 2229 ui.status(_("skipping missing subrepository: %s\n")
2246 2230 % join(subpath))
2247 2231
2248 2232 if not opts.get('dry_run'):
2249 2233 rejected = wctx.add(names, prefix)
2250 2234 bad.extend(f for f in rejected if f in match.files())
2251 2235 return bad
2252 2236
2253 2237 def forget(ui, repo, match, prefix, explicitonly):
2254 2238 join = lambda f: os.path.join(prefix, f)
2255 2239 bad = []
2256 2240 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2257 2241 wctx = repo[None]
2258 2242 forgot = []
2259 2243
2260 2244 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2261 2245 forget = sorted(s[0] + s[1] + s[3] + s[6])
2262 2246 if explicitonly:
2263 2247 forget = [f for f in forget if match.exact(f)]
2264 2248
2265 2249 for subpath in sorted(wctx.substate):
2266 2250 sub = wctx.sub(subpath)
2267 2251 try:
2268 2252 submatch = matchmod.narrowmatcher(subpath, match)
2269 2253 subbad, subforgot = sub.forget(submatch, prefix)
2270 2254 bad.extend([subpath + '/' + f for f in subbad])
2271 2255 forgot.extend([subpath + '/' + f for f in subforgot])
2272 2256 except error.LookupError:
2273 2257 ui.status(_("skipping missing subrepository: %s\n")
2274 2258 % join(subpath))
2275 2259
2276 2260 if not explicitonly:
2277 2261 for f in match.files():
2278 2262 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2279 2263 if f not in forgot:
2280 2264 if repo.wvfs.exists(f):
2281 2265 # Don't complain if the exact case match wasn't given.
2282 2266 # But don't do this until after checking 'forgot', so
2283 2267 # that subrepo files aren't normalized, and this op is
2284 2268 # purely from data cached by the status walk above.
2285 2269 if repo.dirstate.normalize(f) in repo.dirstate:
2286 2270 continue
2287 2271 ui.warn(_('not removing %s: '
2288 2272 'file is already untracked\n')
2289 2273 % match.rel(f))
2290 2274 bad.append(f)
2291 2275
2292 2276 for f in forget:
2293 2277 if ui.verbose or not match.exact(f):
2294 2278 ui.status(_('removing %s\n') % match.rel(f))
2295 2279
2296 2280 rejected = wctx.forget(forget, prefix)
2297 2281 bad.extend(f for f in rejected if f in match.files())
2298 2282 forgot.extend(f for f in forget if f not in rejected)
2299 2283 return bad, forgot
2300 2284
2301 2285 def files(ui, ctx, m, fm, fmt, subrepos):
2302 2286 rev = ctx.rev()
2303 2287 ret = 1
2304 2288 ds = ctx.repo().dirstate
2305 2289
2306 2290 for f in ctx.matches(m):
2307 2291 if rev is None and ds[f] == 'r':
2308 2292 continue
2309 2293 fm.startitem()
2310 2294 if ui.verbose:
2311 2295 fc = ctx[f]
2312 2296 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2313 2297 fm.data(abspath=f)
2314 2298 fm.write('path', fmt, m.rel(f))
2315 2299 ret = 0
2316 2300
2317 2301 for subpath in sorted(ctx.substate):
2318 2302 def matchessubrepo(subpath):
2319 2303 return (m.always() or m.exact(subpath)
2320 2304 or any(f.startswith(subpath + '/') for f in m.files()))
2321 2305
2322 2306 if subrepos or matchessubrepo(subpath):
2323 2307 sub = ctx.sub(subpath)
2324 2308 try:
2325 2309 submatch = matchmod.narrowmatcher(subpath, m)
2326 2310 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2327 2311 ret = 0
2328 2312 except error.LookupError:
2329 2313 ui.status(_("skipping missing subrepository: %s\n")
2330 2314 % m.abs(subpath))
2331 2315
2332 2316 return ret
2333 2317
2334 2318 def remove(ui, repo, m, prefix, after, force, subrepos):
2335 2319 join = lambda f: os.path.join(prefix, f)
2336 2320 ret = 0
2337 2321 s = repo.status(match=m, clean=True)
2338 2322 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2339 2323
2340 2324 wctx = repo[None]
2341 2325
2342 2326 for subpath in sorted(wctx.substate):
2343 2327 def matchessubrepo(matcher, subpath):
2344 2328 if matcher.exact(subpath):
2345 2329 return True
2346 2330 for f in matcher.files():
2347 2331 if f.startswith(subpath):
2348 2332 return True
2349 2333 return False
2350 2334
2351 2335 if subrepos or matchessubrepo(m, subpath):
2352 2336 sub = wctx.sub(subpath)
2353 2337 try:
2354 2338 submatch = matchmod.narrowmatcher(subpath, m)
2355 2339 if sub.removefiles(submatch, prefix, after, force, subrepos):
2356 2340 ret = 1
2357 2341 except error.LookupError:
2358 2342 ui.status(_("skipping missing subrepository: %s\n")
2359 2343 % join(subpath))
2360 2344
2361 2345 # warn about failure to delete explicit files/dirs
2362 2346 deleteddirs = util.dirs(deleted)
2363 2347 for f in m.files():
2364 2348 def insubrepo():
2365 2349 for subpath in wctx.substate:
2366 2350 if f.startswith(subpath):
2367 2351 return True
2368 2352 return False
2369 2353
2370 2354 isdir = f in deleteddirs or wctx.hasdir(f)
2371 2355 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2372 2356 continue
2373 2357
2374 2358 if repo.wvfs.exists(f):
2375 2359 if repo.wvfs.isdir(f):
2376 2360 ui.warn(_('not removing %s: no tracked files\n')
2377 2361 % m.rel(f))
2378 2362 else:
2379 2363 ui.warn(_('not removing %s: file is untracked\n')
2380 2364 % m.rel(f))
2381 2365 # missing files will generate a warning elsewhere
2382 2366 ret = 1
2383 2367
2384 2368 if force:
2385 2369 list = modified + deleted + clean + added
2386 2370 elif after:
2387 2371 list = deleted
2388 2372 for f in modified + added + clean:
2389 2373 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2390 2374 ret = 1
2391 2375 else:
2392 2376 list = deleted + clean
2393 2377 for f in modified:
2394 2378 ui.warn(_('not removing %s: file is modified (use -f'
2395 2379 ' to force removal)\n') % m.rel(f))
2396 2380 ret = 1
2397 2381 for f in added:
2398 2382 ui.warn(_('not removing %s: file has been marked for add'
2399 2383 ' (use forget to undo)\n') % m.rel(f))
2400 2384 ret = 1
2401 2385
2402 2386 for f in sorted(list):
2403 2387 if ui.verbose or not m.exact(f):
2404 2388 ui.status(_('removing %s\n') % m.rel(f))
2405 2389
2406 2390 wlock = repo.wlock()
2407 2391 try:
2408 2392 if not after:
2409 2393 for f in list:
2410 2394 if f in added:
2411 2395 continue # we never unlink added files on remove
2412 2396 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2413 2397 repo[None].forget(list)
2414 2398 finally:
2415 2399 wlock.release()
2416 2400
2417 2401 return ret
2418 2402
2419 2403 def cat(ui, repo, ctx, matcher, prefix, **opts):
2420 2404 err = 1
2421 2405
2422 2406 def write(path):
2423 2407 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2424 2408 pathname=os.path.join(prefix, path))
2425 2409 data = ctx[path].data()
2426 2410 if opts.get('decode'):
2427 2411 data = repo.wwritedata(path, data)
2428 2412 fp.write(data)
2429 2413 fp.close()
2430 2414
2431 2415 # Automation often uses hg cat on single files, so special case it
2432 2416 # for performance to avoid the cost of parsing the manifest.
2433 2417 if len(matcher.files()) == 1 and not matcher.anypats():
2434 2418 file = matcher.files()[0]
2435 2419 mf = repo.manifest
2436 2420 mfnode = ctx.manifestnode()
2437 2421 if mfnode and mf.find(mfnode, file)[0]:
2438 2422 write(file)
2439 2423 return 0
2440 2424
2441 2425 # Don't warn about "missing" files that are really in subrepos
2442 2426 def badfn(path, msg):
2443 2427 for subpath in ctx.substate:
2444 2428 if path.startswith(subpath):
2445 2429 return
2446 2430 matcher.bad(path, msg)
2447 2431
2448 2432 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2449 2433 write(abs)
2450 2434 err = 0
2451 2435
2452 2436 for subpath in sorted(ctx.substate):
2453 2437 sub = ctx.sub(subpath)
2454 2438 try:
2455 2439 submatch = matchmod.narrowmatcher(subpath, matcher)
2456 2440
2457 2441 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2458 2442 **opts):
2459 2443 err = 0
2460 2444 except error.RepoLookupError:
2461 2445 ui.status(_("skipping missing subrepository: %s\n")
2462 2446 % os.path.join(prefix, subpath))
2463 2447
2464 2448 return err
2465 2449
2466 2450 def commit(ui, repo, commitfunc, pats, opts):
2467 2451 '''commit the specified files or all outstanding changes'''
2468 2452 date = opts.get('date')
2469 2453 if date:
2470 2454 opts['date'] = util.parsedate(date)
2471 2455 message = logmessage(ui, opts)
2472 2456 matcher = scmutil.match(repo[None], pats, opts)
2473 2457
2474 2458 # extract addremove carefully -- this function can be called from a command
2475 2459 # that doesn't support addremove
2476 2460 if opts.get('addremove'):
2477 2461 if scmutil.addremove(repo, matcher, "", opts) != 0:
2478 2462 raise util.Abort(
2479 2463 _("failed to mark all new/missing files as added/removed"))
2480 2464
2481 2465 return commitfunc(ui, repo, message, matcher, opts)
2482 2466
2483 2467 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2484 2468 # avoid cycle context -> subrepo -> cmdutil
2485 2469 import context
2486 2470
2487 2471 # amend will reuse the existing user if not specified, but the obsolete
2488 2472 # marker creation requires that the current user's name is specified.
2489 2473 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2490 2474 ui.username() # raise exception if username not set
2491 2475
2492 2476 ui.note(_('amending changeset %s\n') % old)
2493 2477 base = old.p1()
2494 2478 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2495 2479
2496 2480 wlock = dsguard = lock = newid = None
2497 2481 try:
2498 2482 wlock = repo.wlock()
2499 2483 dsguard = dirstateguard(repo, 'amend')
2500 2484 lock = repo.lock()
2501 2485 tr = repo.transaction('amend')
2502 2486 try:
2503 2487 # See if we got a message from -m or -l, if not, open the editor
2504 2488 # with the message of the changeset to amend
2505 2489 message = logmessage(ui, opts)
2506 2490 # ensure logfile does not conflict with later enforcement of the
2507 2491 # message. potential logfile content has been processed by
2508 2492 # `logmessage` anyway.
2509 2493 opts.pop('logfile')
2510 2494 # First, do a regular commit to record all changes in the working
2511 2495 # directory (if there are any)
2512 2496 ui.callhooks = False
2513 2497 activebookmark = repo._activebookmark
2514 2498 try:
2515 2499 repo._activebookmark = None
2516 2500 opts['message'] = 'temporary amend commit for %s' % old
2517 2501 node = commit(ui, repo, commitfunc, pats, opts)
2518 2502 finally:
2519 2503 repo._activebookmark = activebookmark
2520 2504 ui.callhooks = True
2521 2505 ctx = repo[node]
2522 2506
2523 2507 # Participating changesets:
2524 2508 #
2525 2509 # node/ctx o - new (intermediate) commit that contains changes
2526 2510 # | from working dir to go into amending commit
2527 2511 # | (or a workingctx if there were no changes)
2528 2512 # |
2529 2513 # old o - changeset to amend
2530 2514 # |
2531 2515 # base o - parent of amending changeset
2532 2516
2533 2517 # Update extra dict from amended commit (e.g. to preserve graft
2534 2518 # source)
2535 2519 extra.update(old.extra())
2536 2520
2537 2521 # Also update it from the intermediate commit or from the wctx
2538 2522 extra.update(ctx.extra())
2539 2523
2540 2524 if len(old.parents()) > 1:
2541 2525 # ctx.files() isn't reliable for merges, so fall back to the
2542 2526 # slower repo.status() method
2543 2527 files = set([fn for st in repo.status(base, old)[:3]
2544 2528 for fn in st])
2545 2529 else:
2546 2530 files = set(old.files())
2547 2531
2548 2532 # Second, we use either the commit we just did, or if there were no
2549 2533 # changes the parent of the working directory as the version of the
2550 2534 # files in the final amend commit
2551 2535 if node:
2552 2536 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2553 2537
2554 2538 user = ctx.user()
2555 2539 date = ctx.date()
2556 2540 # Recompute copies (avoid recording a -> b -> a)
2557 2541 copied = copies.pathcopies(base, ctx)
2558 2542 if old.p2:
2559 2543 copied.update(copies.pathcopies(old.p2(), ctx))
2560 2544
2561 2545 # Prune files which were reverted by the updates: if old
2562 2546 # introduced file X and our intermediate commit, node,
2563 2547 # renamed that file, then those two files are the same and
2564 2548 # we can discard X from our list of files. Likewise if X
2565 2549 # was deleted, it's no longer relevant
2566 2550 files.update(ctx.files())
2567 2551
2568 2552 def samefile(f):
2569 2553 if f in ctx.manifest():
2570 2554 a = ctx.filectx(f)
2571 2555 if f in base.manifest():
2572 2556 b = base.filectx(f)
2573 2557 return (not a.cmp(b)
2574 2558 and a.flags() == b.flags())
2575 2559 else:
2576 2560 return False
2577 2561 else:
2578 2562 return f not in base.manifest()
2579 2563 files = [f for f in files if not samefile(f)]
2580 2564
2581 2565 def filectxfn(repo, ctx_, path):
2582 2566 try:
2583 2567 fctx = ctx[path]
2584 2568 flags = fctx.flags()
2585 2569 mctx = context.memfilectx(repo,
2586 2570 fctx.path(), fctx.data(),
2587 2571 islink='l' in flags,
2588 2572 isexec='x' in flags,
2589 2573 copied=copied.get(path))
2590 2574 return mctx
2591 2575 except KeyError:
2592 2576 return None
2593 2577 else:
2594 2578 ui.note(_('copying changeset %s to %s\n') % (old, base))
2595 2579
2596 2580 # Use version of files as in the old cset
2597 2581 def filectxfn(repo, ctx_, path):
2598 2582 try:
2599 2583 return old.filectx(path)
2600 2584 except KeyError:
2601 2585 return None
2602 2586
2603 2587 user = opts.get('user') or old.user()
2604 2588 date = opts.get('date') or old.date()
2605 2589 editform = mergeeditform(old, 'commit.amend')
2606 2590 editor = getcommiteditor(editform=editform, **opts)
2607 2591 if not message:
2608 2592 editor = getcommiteditor(edit=True, editform=editform)
2609 2593 message = old.description()
2610 2594
2611 2595 pureextra = extra.copy()
2612 2596 extra['amend_source'] = old.hex()
2613 2597
2614 2598 new = context.memctx(repo,
2615 2599 parents=[base.node(), old.p2().node()],
2616 2600 text=message,
2617 2601 files=files,
2618 2602 filectxfn=filectxfn,
2619 2603 user=user,
2620 2604 date=date,
2621 2605 extra=extra,
2622 2606 editor=editor)
2623 2607
2624 2608 newdesc = changelog.stripdesc(new.description())
2625 2609 if ((not node)
2626 2610 and newdesc == old.description()
2627 2611 and user == old.user()
2628 2612 and date == old.date()
2629 2613 and pureextra == old.extra()):
2630 2614 # nothing changed. continuing here would create a new node
2631 2615 # anyway because of the amend_source noise.
2632 2616 #
2633 2617 # This not what we expect from amend.
2634 2618 return old.node()
2635 2619
2636 2620 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2637 2621 try:
2638 2622 if opts.get('secret'):
2639 2623 commitphase = 'secret'
2640 2624 else:
2641 2625 commitphase = old.phase()
2642 2626 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2643 2627 newid = repo.commitctx(new)
2644 2628 finally:
2645 2629 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2646 2630 if newid != old.node():
2647 2631 # Reroute the working copy parent to the new changeset
2648 2632 repo.setparents(newid, nullid)
2649 2633
2650 2634 # Move bookmarks from old parent to amend commit
2651 2635 bms = repo.nodebookmarks(old.node())
2652 2636 if bms:
2653 2637 marks = repo._bookmarks
2654 2638 for bm in bms:
2655 2639 ui.debug('moving bookmarks %r from %s to %s\n' %
2656 2640 (marks, old.hex(), hex(newid)))
2657 2641 marks[bm] = newid
2658 2642 marks.recordchange(tr)
2659 2643 #commit the whole amend process
2660 2644 if createmarkers:
2661 2645 # mark the new changeset as successor of the rewritten one
2662 2646 new = repo[newid]
2663 2647 obs = [(old, (new,))]
2664 2648 if node:
2665 2649 obs.append((ctx, ()))
2666 2650
2667 2651 obsolete.createmarkers(repo, obs)
2668 2652 tr.close()
2669 2653 finally:
2670 2654 tr.release()
2671 2655 dsguard.close()
2672 2656 if not createmarkers and newid != old.node():
2673 2657 # Strip the intermediate commit (if there was one) and the amended
2674 2658 # commit
2675 2659 if node:
2676 2660 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2677 2661 ui.note(_('stripping amended changeset %s\n') % old)
2678 2662 repair.strip(ui, repo, old.node(), topic='amend-backup')
2679 2663 finally:
2680 2664 lockmod.release(lock, dsguard, wlock)
2681 2665 return newid
2682 2666
2683 2667 def commiteditor(repo, ctx, subs, editform=''):
2684 2668 if ctx.description():
2685 2669 return ctx.description()
2686 2670 return commitforceeditor(repo, ctx, subs, editform=editform)
2687 2671
2688 2672 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2689 2673 editform=''):
2690 2674 if not extramsg:
2691 2675 extramsg = _("Leave message empty to abort commit.")
2692 2676
2693 2677 forms = [e for e in editform.split('.') if e]
2694 2678 forms.insert(0, 'changeset')
2695 2679 while forms:
2696 2680 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2697 2681 if tmpl:
2698 2682 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2699 2683 break
2700 2684 forms.pop()
2701 2685 else:
2702 2686 committext = buildcommittext(repo, ctx, subs, extramsg)
2703 2687
2704 2688 # run editor in the repository root
2705 2689 olddir = os.getcwd()
2706 2690 os.chdir(repo.root)
2707 2691 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2708 2692 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2709 2693 os.chdir(olddir)
2710 2694
2711 2695 if finishdesc:
2712 2696 text = finishdesc(text)
2713 2697 if not text.strip():
2714 2698 raise util.Abort(_("empty commit message"))
2715 2699
2716 2700 return text
2717 2701
2718 2702 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2719 2703 ui = repo.ui
2720 2704 tmpl, mapfile = gettemplate(ui, tmpl, None)
2721 2705
2722 2706 try:
2723 2707 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2724 2708 except SyntaxError as inst:
2725 2709 raise util.Abort(inst.args[0])
2726 2710
2727 2711 for k, v in repo.ui.configitems('committemplate'):
2728 2712 if k != 'changeset':
2729 2713 t.t.cache[k] = v
2730 2714
2731 2715 if not extramsg:
2732 2716 extramsg = '' # ensure that extramsg is string
2733 2717
2734 2718 ui.pushbuffer()
2735 2719 t.show(ctx, extramsg=extramsg)
2736 2720 return ui.popbuffer()
2737 2721
2738 2722 def hgprefix(msg):
2739 2723 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2740 2724
2741 2725 def buildcommittext(repo, ctx, subs, extramsg):
2742 2726 edittext = []
2743 2727 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2744 2728 if ctx.description():
2745 2729 edittext.append(ctx.description())
2746 2730 edittext.append("")
2747 2731 edittext.append("") # Empty line between message and comments.
2748 2732 edittext.append(hgprefix(_("Enter commit message."
2749 2733 " Lines beginning with 'HG:' are removed.")))
2750 2734 edittext.append(hgprefix(extramsg))
2751 2735 edittext.append("HG: --")
2752 2736 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2753 2737 if ctx.p2():
2754 2738 edittext.append(hgprefix(_("branch merge")))
2755 2739 if ctx.branch():
2756 2740 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2757 2741 if bookmarks.isactivewdirparent(repo):
2758 2742 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2759 2743 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2760 2744 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2761 2745 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2762 2746 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2763 2747 if not added and not modified and not removed:
2764 2748 edittext.append(hgprefix(_("no files changed")))
2765 2749 edittext.append("")
2766 2750
2767 2751 return "\n".join(edittext)
2768 2752
2769 2753 def commitstatus(repo, node, branch, bheads=None, opts=None):
2770 2754 if opts is None:
2771 2755 opts = {}
2772 2756 ctx = repo[node]
2773 2757 parents = ctx.parents()
2774 2758
2775 2759 if (not opts.get('amend') and bheads and node not in bheads and not
2776 2760 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2777 2761 repo.ui.status(_('created new head\n'))
2778 2762 # The message is not printed for initial roots. For the other
2779 2763 # changesets, it is printed in the following situations:
2780 2764 #
2781 2765 # Par column: for the 2 parents with ...
2782 2766 # N: null or no parent
2783 2767 # B: parent is on another named branch
2784 2768 # C: parent is a regular non head changeset
2785 2769 # H: parent was a branch head of the current branch
2786 2770 # Msg column: whether we print "created new head" message
2787 2771 # In the following, it is assumed that there already exists some
2788 2772 # initial branch heads of the current branch, otherwise nothing is
2789 2773 # printed anyway.
2790 2774 #
2791 2775 # Par Msg Comment
2792 2776 # N N y additional topo root
2793 2777 #
2794 2778 # B N y additional branch root
2795 2779 # C N y additional topo head
2796 2780 # H N n usual case
2797 2781 #
2798 2782 # B B y weird additional branch root
2799 2783 # C B y branch merge
2800 2784 # H B n merge with named branch
2801 2785 #
2802 2786 # C C y additional head from merge
2803 2787 # C H n merge with a head
2804 2788 #
2805 2789 # H H n head merge: head count decreases
2806 2790
2807 2791 if not opts.get('close_branch'):
2808 2792 for r in parents:
2809 2793 if r.closesbranch() and r.branch() == branch:
2810 2794 repo.ui.status(_('reopening closed branch head %d\n') % r)
2811 2795
2812 2796 if repo.ui.debugflag:
2813 2797 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2814 2798 elif repo.ui.verbose:
2815 2799 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2816 2800
2817 2801 def revert(ui, repo, ctx, parents, *pats, **opts):
2818 2802 parent, p2 = parents
2819 2803 node = ctx.node()
2820 2804
2821 2805 mf = ctx.manifest()
2822 2806 if node == p2:
2823 2807 parent = p2
2824 2808 if node == parent:
2825 2809 pmf = mf
2826 2810 else:
2827 2811 pmf = None
2828 2812
2829 2813 # need all matching names in dirstate and manifest of target rev,
2830 2814 # so have to walk both. do not print errors if files exist in one
2831 2815 # but not other. in both cases, filesets should be evaluated against
2832 2816 # workingctx to get consistent result (issue4497). this means 'set:**'
2833 2817 # cannot be used to select missing files from target rev.
2834 2818
2835 2819 # `names` is a mapping for all elements in working copy and target revision
2836 2820 # The mapping is in the form:
2837 2821 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2838 2822 names = {}
2839 2823
2840 2824 wlock = repo.wlock()
2841 2825 try:
2842 2826 ## filling of the `names` mapping
2843 2827 # walk dirstate to fill `names`
2844 2828
2845 2829 interactive = opts.get('interactive', False)
2846 2830 wctx = repo[None]
2847 2831 m = scmutil.match(wctx, pats, opts)
2848 2832
2849 2833 # we'll need this later
2850 2834 targetsubs = sorted(s for s in wctx.substate if m(s))
2851 2835
2852 2836 if not m.always():
2853 2837 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2854 2838 names[abs] = m.rel(abs), m.exact(abs)
2855 2839
2856 2840 # walk target manifest to fill `names`
2857 2841
2858 2842 def badfn(path, msg):
2859 2843 if path in names:
2860 2844 return
2861 2845 if path in ctx.substate:
2862 2846 return
2863 2847 path_ = path + '/'
2864 2848 for f in names:
2865 2849 if f.startswith(path_):
2866 2850 return
2867 2851 ui.warn("%s: %s\n" % (m.rel(path), msg))
2868 2852
2869 2853 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2870 2854 if abs not in names:
2871 2855 names[abs] = m.rel(abs), m.exact(abs)
2872 2856
2873 2857 # Find status of all file in `names`.
2874 2858 m = scmutil.matchfiles(repo, names)
2875 2859
2876 2860 changes = repo.status(node1=node, match=m,
2877 2861 unknown=True, ignored=True, clean=True)
2878 2862 else:
2879 2863 changes = repo.status(node1=node, match=m)
2880 2864 for kind in changes:
2881 2865 for abs in kind:
2882 2866 names[abs] = m.rel(abs), m.exact(abs)
2883 2867
2884 2868 m = scmutil.matchfiles(repo, names)
2885 2869
2886 2870 modified = set(changes.modified)
2887 2871 added = set(changes.added)
2888 2872 removed = set(changes.removed)
2889 2873 _deleted = set(changes.deleted)
2890 2874 unknown = set(changes.unknown)
2891 2875 unknown.update(changes.ignored)
2892 2876 clean = set(changes.clean)
2893 2877 modadded = set()
2894 2878
2895 2879 # split between files known in target manifest and the others
2896 2880 smf = set(mf)
2897 2881
2898 2882 # determine the exact nature of the deleted changesets
2899 2883 deladded = _deleted - smf
2900 2884 deleted = _deleted - deladded
2901 2885
2902 2886 # We need to account for the state of the file in the dirstate,
2903 2887 # even when we revert against something else than parent. This will
2904 2888 # slightly alter the behavior of revert (doing back up or not, delete
2905 2889 # or just forget etc).
2906 2890 if parent == node:
2907 2891 dsmodified = modified
2908 2892 dsadded = added
2909 2893 dsremoved = removed
2910 2894 # store all local modifications, useful later for rename detection
2911 2895 localchanges = dsmodified | dsadded
2912 2896 modified, added, removed = set(), set(), set()
2913 2897 else:
2914 2898 changes = repo.status(node1=parent, match=m)
2915 2899 dsmodified = set(changes.modified)
2916 2900 dsadded = set(changes.added)
2917 2901 dsremoved = set(changes.removed)
2918 2902 # store all local modifications, useful later for rename detection
2919 2903 localchanges = dsmodified | dsadded
2920 2904
2921 2905 # only take into account for removes between wc and target
2922 2906 clean |= dsremoved - removed
2923 2907 dsremoved &= removed
2924 2908 # distinct between dirstate remove and other
2925 2909 removed -= dsremoved
2926 2910
2927 2911 modadded = added & dsmodified
2928 2912 added -= modadded
2929 2913
2930 2914 # tell newly modified apart.
2931 2915 dsmodified &= modified
2932 2916 dsmodified |= modified & dsadded # dirstate added may needs backup
2933 2917 modified -= dsmodified
2934 2918
2935 2919 # We need to wait for some post-processing to update this set
2936 2920 # before making the distinction. The dirstate will be used for
2937 2921 # that purpose.
2938 2922 dsadded = added
2939 2923
2940 2924 # in case of merge, files that are actually added can be reported as
2941 2925 # modified, we need to post process the result
2942 2926 if p2 != nullid:
2943 2927 if pmf is None:
2944 2928 # only need parent manifest in the merge case,
2945 2929 # so do not read by default
2946 2930 pmf = repo[parent].manifest()
2947 2931 mergeadd = dsmodified - set(pmf)
2948 2932 dsadded |= mergeadd
2949 2933 dsmodified -= mergeadd
2950 2934
2951 2935 # if f is a rename, update `names` to also revert the source
2952 2936 cwd = repo.getcwd()
2953 2937 for f in localchanges:
2954 2938 src = repo.dirstate.copied(f)
2955 2939 # XXX should we check for rename down to target node?
2956 2940 if src and src not in names and repo.dirstate[src] == 'r':
2957 2941 dsremoved.add(src)
2958 2942 names[src] = (repo.pathto(src, cwd), True)
2959 2943
2960 2944 # distinguish between file to forget and the other
2961 2945 added = set()
2962 2946 for abs in dsadded:
2963 2947 if repo.dirstate[abs] != 'a':
2964 2948 added.add(abs)
2965 2949 dsadded -= added
2966 2950
2967 2951 for abs in deladded:
2968 2952 if repo.dirstate[abs] == 'a':
2969 2953 dsadded.add(abs)
2970 2954 deladded -= dsadded
2971 2955
2972 2956 # For files marked as removed, we check if an unknown file is present at
2973 2957 # the same path. If a such file exists it may need to be backed up.
2974 2958 # Making the distinction at this stage helps have simpler backup
2975 2959 # logic.
2976 2960 removunk = set()
2977 2961 for abs in removed:
2978 2962 target = repo.wjoin(abs)
2979 2963 if os.path.lexists(target):
2980 2964 removunk.add(abs)
2981 2965 removed -= removunk
2982 2966
2983 2967 dsremovunk = set()
2984 2968 for abs in dsremoved:
2985 2969 target = repo.wjoin(abs)
2986 2970 if os.path.lexists(target):
2987 2971 dsremovunk.add(abs)
2988 2972 dsremoved -= dsremovunk
2989 2973
2990 2974 # action to be actually performed by revert
2991 2975 # (<list of file>, message>) tuple
2992 2976 actions = {'revert': ([], _('reverting %s\n')),
2993 2977 'add': ([], _('adding %s\n')),
2994 2978 'remove': ([], _('removing %s\n')),
2995 2979 'drop': ([], _('removing %s\n')),
2996 2980 'forget': ([], _('forgetting %s\n')),
2997 2981 'undelete': ([], _('undeleting %s\n')),
2998 2982 'noop': (None, _('no changes needed to %s\n')),
2999 2983 'unknown': (None, _('file not managed: %s\n')),
3000 2984 }
3001 2985
3002 2986 # "constant" that convey the backup strategy.
3003 2987 # All set to `discard` if `no-backup` is set do avoid checking
3004 2988 # no_backup lower in the code.
3005 2989 # These values are ordered for comparison purposes
3006 2990 backup = 2 # unconditionally do backup
3007 2991 check = 1 # check if the existing file differs from target
3008 2992 discard = 0 # never do backup
3009 2993 if opts.get('no_backup'):
3010 2994 backup = check = discard
3011 2995
3012 2996 backupanddel = actions['remove']
3013 2997 if not opts.get('no_backup'):
3014 2998 backupanddel = actions['drop']
3015 2999
3016 3000 disptable = (
3017 3001 # dispatch table:
3018 3002 # file state
3019 3003 # action
3020 3004 # make backup
3021 3005
3022 3006 ## Sets that results that will change file on disk
3023 3007 # Modified compared to target, no local change
3024 3008 (modified, actions['revert'], discard),
3025 3009 # Modified compared to target, but local file is deleted
3026 3010 (deleted, actions['revert'], discard),
3027 3011 # Modified compared to target, local change
3028 3012 (dsmodified, actions['revert'], backup),
3029 3013 # Added since target
3030 3014 (added, actions['remove'], discard),
3031 3015 # Added in working directory
3032 3016 (dsadded, actions['forget'], discard),
3033 3017 # Added since target, have local modification
3034 3018 (modadded, backupanddel, backup),
3035 3019 # Added since target but file is missing in working directory
3036 3020 (deladded, actions['drop'], discard),
3037 3021 # Removed since target, before working copy parent
3038 3022 (removed, actions['add'], discard),
3039 3023 # Same as `removed` but an unknown file exists at the same path
3040 3024 (removunk, actions['add'], check),
3041 3025 # Removed since targe, marked as such in working copy parent
3042 3026 (dsremoved, actions['undelete'], discard),
3043 3027 # Same as `dsremoved` but an unknown file exists at the same path
3044 3028 (dsremovunk, actions['undelete'], check),
3045 3029 ## the following sets does not result in any file changes
3046 3030 # File with no modification
3047 3031 (clean, actions['noop'], discard),
3048 3032 # Existing file, not tracked anywhere
3049 3033 (unknown, actions['unknown'], discard),
3050 3034 )
3051 3035
3052 3036 for abs, (rel, exact) in sorted(names.items()):
3053 3037 # target file to be touch on disk (relative to cwd)
3054 3038 target = repo.wjoin(abs)
3055 3039 # search the entry in the dispatch table.
3056 3040 # if the file is in any of these sets, it was touched in the working
3057 3041 # directory parent and we are sure it needs to be reverted.
3058 3042 for table, (xlist, msg), dobackup in disptable:
3059 3043 if abs not in table:
3060 3044 continue
3061 3045 if xlist is not None:
3062 3046 xlist.append(abs)
3063 3047 if dobackup and (backup <= dobackup
3064 3048 or wctx[abs].cmp(ctx[abs])):
3065 3049 bakname = "%s.orig" % rel
3066 3050 ui.note(_('saving current version of %s as %s\n') %
3067 3051 (rel, bakname))
3068 3052 if not opts.get('dry_run'):
3069 3053 if interactive:
3070 3054 util.copyfile(target, bakname)
3071 3055 else:
3072 3056 util.rename(target, bakname)
3073 3057 if ui.verbose or not exact:
3074 3058 if not isinstance(msg, basestring):
3075 3059 msg = msg(abs)
3076 3060 ui.status(msg % rel)
3077 3061 elif exact:
3078 3062 ui.warn(msg % rel)
3079 3063 break
3080 3064
3081 3065 if not opts.get('dry_run'):
3082 3066 needdata = ('revert', 'add', 'undelete')
3083 3067 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3084 3068 _performrevert(repo, parents, ctx, actions, interactive)
3085 3069
3086 3070 if targetsubs:
3087 3071 # Revert the subrepos on the revert list
3088 3072 for sub in targetsubs:
3089 3073 try:
3090 3074 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3091 3075 except KeyError:
3092 3076 raise util.Abort("subrepository '%s' does not exist in %s!"
3093 3077 % (sub, short(ctx.node())))
3094 3078 finally:
3095 3079 wlock.release()
3096 3080
3097 3081 def _revertprefetch(repo, ctx, *files):
3098 3082 """Let extension changing the storage layer prefetch content"""
3099 3083 pass
3100 3084
3101 3085 def _performrevert(repo, parents, ctx, actions, interactive=False):
3102 3086 """function that actually perform all the actions computed for revert
3103 3087
3104 3088 This is an independent function to let extension to plug in and react to
3105 3089 the imminent revert.
3106 3090
3107 3091 Make sure you have the working directory locked when calling this function.
3108 3092 """
3109 3093 parent, p2 = parents
3110 3094 node = ctx.node()
3111 3095 def checkout(f):
3112 3096 fc = ctx[f]
3113 3097 repo.wwrite(f, fc.data(), fc.flags())
3114 3098
3115 3099 audit_path = pathutil.pathauditor(repo.root)
3116 3100 for f in actions['forget'][0]:
3117 3101 repo.dirstate.drop(f)
3118 3102 for f in actions['remove'][0]:
3119 3103 audit_path(f)
3120 3104 try:
3121 3105 util.unlinkpath(repo.wjoin(f))
3122 3106 except OSError:
3123 3107 pass
3124 3108 repo.dirstate.remove(f)
3125 3109 for f in actions['drop'][0]:
3126 3110 audit_path(f)
3127 3111 repo.dirstate.remove(f)
3128 3112
3129 3113 normal = None
3130 3114 if node == parent:
3131 3115 # We're reverting to our parent. If possible, we'd like status
3132 3116 # to report the file as clean. We have to use normallookup for
3133 3117 # merges to avoid losing information about merged/dirty files.
3134 3118 if p2 != nullid:
3135 3119 normal = repo.dirstate.normallookup
3136 3120 else:
3137 3121 normal = repo.dirstate.normal
3138 3122
3139 3123 newlyaddedandmodifiedfiles = set()
3140 3124 if interactive:
3141 3125 # Prompt the user for changes to revert
3142 3126 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3143 3127 m = scmutil.match(ctx, torevert, {})
3144 3128 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3145 3129 diffopts.nodates = True
3146 3130 diffopts.git = True
3147 3131 reversehunks = repo.ui.configbool('experimental',
3148 3132 'revertalternateinteractivemode',
3149 3133 True)
3150 3134 if reversehunks:
3151 3135 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3152 3136 else:
3153 3137 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3154 3138 originalchunks = patch.parsepatch(diff)
3155 3139
3156 3140 try:
3157 3141
3158 3142 chunks = recordfilter(repo.ui, originalchunks)
3159 3143 if reversehunks:
3160 3144 chunks = patch.reversehunks(chunks)
3161 3145
3162 3146 except patch.PatchError as err:
3163 3147 raise util.Abort(_('error parsing patch: %s') % err)
3164 3148
3165 3149 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3166 3150 # Apply changes
3167 3151 fp = cStringIO.StringIO()
3168 3152 for c in chunks:
3169 3153 c.write(fp)
3170 3154 dopatch = fp.tell()
3171 3155 fp.seek(0)
3172 3156 if dopatch:
3173 3157 try:
3174 3158 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3175 3159 except patch.PatchError as err:
3176 3160 raise util.Abort(str(err))
3177 3161 del fp
3178 3162 else:
3179 3163 for f in actions['revert'][0]:
3180 3164 checkout(f)
3181 3165 if normal:
3182 3166 normal(f)
3183 3167
3184 3168 for f in actions['add'][0]:
3185 3169 # Don't checkout modified files, they are already created by the diff
3186 3170 if f not in newlyaddedandmodifiedfiles:
3187 3171 checkout(f)
3188 3172 repo.dirstate.add(f)
3189 3173
3190 3174 normal = repo.dirstate.normallookup
3191 3175 if node == parent and p2 == nullid:
3192 3176 normal = repo.dirstate.normal
3193 3177 for f in actions['undelete'][0]:
3194 3178 checkout(f)
3195 3179 normal(f)
3196 3180
3197 3181 copied = copies.pathcopies(repo[parent], ctx)
3198 3182
3199 3183 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3200 3184 if f in copied:
3201 3185 repo.dirstate.copy(copied[f], f)
3202 3186
3203 3187 def command(table):
3204 3188 """Returns a function object to be used as a decorator for making commands.
3205 3189
3206 3190 This function receives a command table as its argument. The table should
3207 3191 be a dict.
3208 3192
3209 3193 The returned function can be used as a decorator for adding commands
3210 3194 to that command table. This function accepts multiple arguments to define
3211 3195 a command.
3212 3196
3213 3197 The first argument is the command name.
3214 3198
3215 3199 The options argument is an iterable of tuples defining command arguments.
3216 3200 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3217 3201
3218 3202 The synopsis argument defines a short, one line summary of how to use the
3219 3203 command. This shows up in the help output.
3220 3204
3221 3205 The norepo argument defines whether the command does not require a
3222 3206 local repository. Most commands operate against a repository, thus the
3223 3207 default is False.
3224 3208
3225 3209 The optionalrepo argument defines whether the command optionally requires
3226 3210 a local repository.
3227 3211
3228 3212 The inferrepo argument defines whether to try to find a repository from the
3229 3213 command line arguments. If True, arguments will be examined for potential
3230 3214 repository locations. See ``findrepo()``. If a repository is found, it
3231 3215 will be used.
3232 3216 """
3233 3217 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3234 3218 inferrepo=False):
3235 3219 def decorator(func):
3236 3220 if synopsis:
3237 3221 table[name] = func, list(options), synopsis
3238 3222 else:
3239 3223 table[name] = func, list(options)
3240 3224
3241 3225 if norepo:
3242 3226 # Avoid import cycle.
3243 3227 import commands
3244 3228 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3245 3229
3246 3230 if optionalrepo:
3247 3231 import commands
3248 3232 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3249 3233
3250 3234 if inferrepo:
3251 3235 import commands
3252 3236 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3253 3237
3254 3238 return func
3255 3239 return decorator
3256 3240
3257 3241 return cmd
3258 3242
3259 3243 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3260 3244 # commands.outgoing. "missing" is "missing" of the result of
3261 3245 # "findcommonoutgoing()"
3262 3246 outgoinghooks = util.hooks()
3263 3247
3264 3248 # a list of (ui, repo) functions called by commands.summary
3265 3249 summaryhooks = util.hooks()
3266 3250
3267 3251 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3268 3252 #
3269 3253 # functions should return tuple of booleans below, if 'changes' is None:
3270 3254 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3271 3255 #
3272 3256 # otherwise, 'changes' is a tuple of tuples below:
3273 3257 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3274 3258 # - (desturl, destbranch, destpeer, outgoing)
3275 3259 summaryremotehooks = util.hooks()
3276 3260
3277 3261 # A list of state files kept by multistep operations like graft.
3278 3262 # Since graft cannot be aborted, it is considered 'clearable' by update.
3279 3263 # note: bisect is intentionally excluded
3280 3264 # (state file, clearable, allowcommit, error, hint)
3281 3265 unfinishedstates = [
3282 3266 ('graftstate', True, False, _('graft in progress'),
3283 3267 _("use 'hg graft --continue' or 'hg update' to abort")),
3284 3268 ('updatestate', True, False, _('last update was interrupted'),
3285 3269 _("use 'hg update' to get a consistent checkout"))
3286 3270 ]
3287 3271
3288 3272 def checkunfinished(repo, commit=False):
3289 3273 '''Look for an unfinished multistep operation, like graft, and abort
3290 3274 if found. It's probably good to check this right before
3291 3275 bailifchanged().
3292 3276 '''
3293 3277 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3294 3278 if commit and allowcommit:
3295 3279 continue
3296 3280 if repo.vfs.exists(f):
3297 3281 raise util.Abort(msg, hint=hint)
3298 3282
3299 3283 def clearunfinished(repo):
3300 3284 '''Check for unfinished operations (as above), and clear the ones
3301 3285 that are clearable.
3302 3286 '''
3303 3287 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3304 3288 if not clearable and repo.vfs.exists(f):
3305 3289 raise util.Abort(msg, hint=hint)
3306 3290 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3307 3291 if clearable and repo.vfs.exists(f):
3308 3292 util.unlink(repo.join(f))
3309 3293
3310 3294 class dirstateguard(object):
3311 3295 '''Restore dirstate at unexpected failure.
3312 3296
3313 3297 At the construction, this class does:
3314 3298
3315 3299 - write current ``repo.dirstate`` out, and
3316 3300 - save ``.hg/dirstate`` into the backup file
3317 3301
3318 3302 This restores ``.hg/dirstate`` from backup file, if ``release()``
3319 3303 is invoked before ``close()``.
3320 3304
3321 3305 This just removes the backup file at ``close()`` before ``release()``.
3322 3306 '''
3323 3307
3324 3308 def __init__(self, repo, name):
3325 3309 repo.dirstate.write()
3326 3310 self._repo = repo
3327 3311 self._filename = 'dirstate.backup.%s.%d' % (name, id(self))
3328 3312 repo.vfs.write(self._filename, repo.vfs.tryread('dirstate'))
3329 3313 self._active = True
3330 3314 self._closed = False
3331 3315
3332 3316 def __del__(self):
3333 3317 if self._active: # still active
3334 3318 # this may occur, even if this class is used correctly:
3335 3319 # for example, releasing other resources like transaction
3336 3320 # may raise exception before ``dirstateguard.release`` in
3337 3321 # ``release(tr, ....)``.
3338 3322 self._abort()
3339 3323
3340 3324 def close(self):
3341 3325 if not self._active: # already inactivated
3342 3326 msg = (_("can't close already inactivated backup: %s")
3343 3327 % self._filename)
3344 3328 raise util.Abort(msg)
3345 3329
3346 3330 self._repo.vfs.unlink(self._filename)
3347 3331 self._active = False
3348 3332 self._closed = True
3349 3333
3350 3334 def _abort(self):
3351 3335 # this "invalidate()" prevents "wlock.release()" from writing
3352 3336 # changes of dirstate out after restoring to original status
3353 3337 self._repo.dirstate.invalidate()
3354 3338
3355 3339 self._repo.vfs.rename(self._filename, 'dirstate')
3356 3340 self._active = False
3357 3341
3358 3342 def release(self):
3359 3343 if not self._closed:
3360 3344 if not self._active: # already inactivated
3361 3345 msg = (_("can't release already inactivated backup: %s")
3362 3346 % self._filename)
3363 3347 raise util.Abort(msg)
3364 3348 self._abort()
@@ -1,1134 +1,1150 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import wdirrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise util.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise util.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise util.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise util.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 190 def filteredhash(repo, maxrev):
191 191 """build hash of filtered revisions in the current repoview.
192 192
193 193 Multiple caches perform up-to-date validation by checking that the
194 194 tiprev and tipnode stored in the cache file match the current repository.
195 195 However, this is not sufficient for validating repoviews because the set
196 196 of revisions in the view may change without the repository tiprev and
197 197 tipnode changing.
198 198
199 199 This function hashes all the revs filtered from the view and returns
200 200 that SHA-1 digest.
201 201 """
202 202 cl = repo.changelog
203 203 if not cl.filteredrevs:
204 204 return None
205 205 key = None
206 206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
207 207 if revs:
208 208 s = util.sha1()
209 209 for rev in revs:
210 210 s.update('%s;' % rev)
211 211 key = s.digest()
212 212 return key
213 213
214 214 class abstractvfs(object):
215 215 """Abstract base class; cannot be instantiated"""
216 216
217 217 def __init__(self, *args, **kwargs):
218 218 '''Prevent instantiation; don't call this from subclasses.'''
219 219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
220 220
221 221 def tryread(self, path):
222 222 '''gracefully return an empty string for missing files'''
223 223 try:
224 224 return self.read(path)
225 225 except IOError as inst:
226 226 if inst.errno != errno.ENOENT:
227 227 raise
228 228 return ""
229 229
230 230 def tryreadlines(self, path, mode='rb'):
231 231 '''gracefully return an empty array for missing files'''
232 232 try:
233 233 return self.readlines(path, mode=mode)
234 234 except IOError as inst:
235 235 if inst.errno != errno.ENOENT:
236 236 raise
237 237 return []
238 238
239 239 def open(self, path, mode="r", text=False, atomictemp=False,
240 240 notindexed=False):
241 241 '''Open ``path`` file, which is relative to vfs root.
242 242
243 243 Newly created directories are marked as "not to be indexed by
244 244 the content indexing service", if ``notindexed`` is specified
245 245 for "write" mode access.
246 246 '''
247 247 self.open = self.__call__
248 248 return self.__call__(path, mode, text, atomictemp, notindexed)
249 249
250 250 def read(self, path):
251 251 fp = self(path, 'rb')
252 252 try:
253 253 return fp.read()
254 254 finally:
255 255 fp.close()
256 256
257 257 def readlines(self, path, mode='rb'):
258 258 fp = self(path, mode=mode)
259 259 try:
260 260 return fp.readlines()
261 261 finally:
262 262 fp.close()
263 263
264 264 def write(self, path, data):
265 265 fp = self(path, 'wb')
266 266 try:
267 267 return fp.write(data)
268 268 finally:
269 269 fp.close()
270 270
271 271 def writelines(self, path, data, mode='wb', notindexed=False):
272 272 fp = self(path, mode=mode, notindexed=notindexed)
273 273 try:
274 274 return fp.writelines(data)
275 275 finally:
276 276 fp.close()
277 277
278 278 def append(self, path, data):
279 279 fp = self(path, 'ab')
280 280 try:
281 281 return fp.write(data)
282 282 finally:
283 283 fp.close()
284 284
285 285 def basename(self, path):
286 286 """return base element of a path (as os.path.basename would do)
287 287
288 288 This exists to allow handling of strange encoding if needed."""
289 289 return os.path.basename(path)
290 290
291 291 def chmod(self, path, mode):
292 292 return os.chmod(self.join(path), mode)
293 293
294 294 def dirname(self, path):
295 295 """return dirname element of a path (as os.path.dirname would do)
296 296
297 297 This exists to allow handling of strange encoding if needed."""
298 298 return os.path.dirname(path)
299 299
300 300 def exists(self, path=None):
301 301 return os.path.exists(self.join(path))
302 302
303 303 def fstat(self, fp):
304 304 return util.fstat(fp)
305 305
306 306 def isdir(self, path=None):
307 307 return os.path.isdir(self.join(path))
308 308
309 309 def isfile(self, path=None):
310 310 return os.path.isfile(self.join(path))
311 311
312 312 def islink(self, path=None):
313 313 return os.path.islink(self.join(path))
314 314
315 315 def reljoin(self, *paths):
316 316 """join various elements of a path together (as os.path.join would do)
317 317
318 318 The vfs base is not injected so that path stay relative. This exists
319 319 to allow handling of strange encoding if needed."""
320 320 return os.path.join(*paths)
321 321
322 322 def split(self, path):
323 323 """split top-most element of a path (as os.path.split would do)
324 324
325 325 This exists to allow handling of strange encoding if needed."""
326 326 return os.path.split(path)
327 327
328 328 def lexists(self, path=None):
329 329 return os.path.lexists(self.join(path))
330 330
331 331 def lstat(self, path=None):
332 332 return os.lstat(self.join(path))
333 333
334 334 def listdir(self, path=None):
335 335 return os.listdir(self.join(path))
336 336
337 337 def makedir(self, path=None, notindexed=True):
338 338 return util.makedir(self.join(path), notindexed)
339 339
340 340 def makedirs(self, path=None, mode=None):
341 341 return util.makedirs(self.join(path), mode)
342 342
343 343 def makelock(self, info, path):
344 344 return util.makelock(info, self.join(path))
345 345
346 346 def mkdir(self, path=None):
347 347 return os.mkdir(self.join(path))
348 348
349 349 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
350 350 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
351 351 dir=self.join(dir), text=text)
352 352 dname, fname = util.split(name)
353 353 if dir:
354 354 return fd, os.path.join(dir, fname)
355 355 else:
356 356 return fd, fname
357 357
358 358 def readdir(self, path=None, stat=None, skip=None):
359 359 return osutil.listdir(self.join(path), stat, skip)
360 360
361 361 def readlock(self, path):
362 362 return util.readlock(self.join(path))
363 363
364 364 def rename(self, src, dst):
365 365 return util.rename(self.join(src), self.join(dst))
366 366
367 367 def readlink(self, path):
368 368 return os.readlink(self.join(path))
369 369
370 370 def removedirs(self, path=None):
371 371 """Remove a leaf directory and all empty intermediate ones
372 372 """
373 373 return util.removedirs(self.join(path))
374 374
375 375 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
376 376 """Remove a directory tree recursively
377 377
378 378 If ``forcibly``, this tries to remove READ-ONLY files, too.
379 379 """
380 380 if forcibly:
381 381 def onerror(function, path, excinfo):
382 382 if function is not os.remove:
383 383 raise
384 384 # read-only files cannot be unlinked under Windows
385 385 s = os.stat(path)
386 386 if (s.st_mode & stat.S_IWRITE) != 0:
387 387 raise
388 388 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
389 389 os.remove(path)
390 390 else:
391 391 onerror = None
392 392 return shutil.rmtree(self.join(path),
393 393 ignore_errors=ignore_errors, onerror=onerror)
394 394
395 395 def setflags(self, path, l, x):
396 396 return util.setflags(self.join(path), l, x)
397 397
398 398 def stat(self, path=None):
399 399 return os.stat(self.join(path))
400 400
401 401 def unlink(self, path=None):
402 402 return util.unlink(self.join(path))
403 403
404 404 def unlinkpath(self, path=None, ignoremissing=False):
405 405 return util.unlinkpath(self.join(path), ignoremissing)
406 406
407 407 def utime(self, path=None, t=None):
408 408 return os.utime(self.join(path), t)
409 409
410 410 def walk(self, path=None, onerror=None):
411 411 """Yield (dirpath, dirs, files) tuple for each directories under path
412 412
413 413 ``dirpath`` is relative one from the root of this vfs. This
414 414 uses ``os.sep`` as path separator, even you specify POSIX
415 415 style ``path``.
416 416
417 417 "The root of this vfs" is represented as empty ``dirpath``.
418 418 """
419 419 root = os.path.normpath(self.join(None))
420 420 # when dirpath == root, dirpath[prefixlen:] becomes empty
421 421 # because len(dirpath) < prefixlen.
422 422 prefixlen = len(pathutil.normasprefix(root))
423 423 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
424 424 yield (dirpath[prefixlen:], dirs, files)
425 425
426 426 class vfs(abstractvfs):
427 427 '''Operate files relative to a base directory
428 428
429 429 This class is used to hide the details of COW semantics and
430 430 remote file access from higher level code.
431 431 '''
432 432 def __init__(self, base, audit=True, expandpath=False, realpath=False):
433 433 if expandpath:
434 434 base = util.expandpath(base)
435 435 if realpath:
436 436 base = os.path.realpath(base)
437 437 self.base = base
438 438 self._setmustaudit(audit)
439 439 self.createmode = None
440 440 self._trustnlink = None
441 441
442 442 def _getmustaudit(self):
443 443 return self._audit
444 444
445 445 def _setmustaudit(self, onoff):
446 446 self._audit = onoff
447 447 if onoff:
448 448 self.audit = pathutil.pathauditor(self.base)
449 449 else:
450 450 self.audit = util.always
451 451
452 452 mustaudit = property(_getmustaudit, _setmustaudit)
453 453
454 454 @util.propertycache
455 455 def _cansymlink(self):
456 456 return util.checklink(self.base)
457 457
458 458 @util.propertycache
459 459 def _chmod(self):
460 460 return util.checkexec(self.base)
461 461
462 462 def _fixfilemode(self, name):
463 463 if self.createmode is None or not self._chmod:
464 464 return
465 465 os.chmod(name, self.createmode & 0o666)
466 466
467 467 def __call__(self, path, mode="r", text=False, atomictemp=False,
468 468 notindexed=False):
469 469 '''Open ``path`` file, which is relative to vfs root.
470 470
471 471 Newly created directories are marked as "not to be indexed by
472 472 the content indexing service", if ``notindexed`` is specified
473 473 for "write" mode access.
474 474 '''
475 475 if self._audit:
476 476 r = util.checkosfilename(path)
477 477 if r:
478 478 raise util.Abort("%s: %r" % (r, path))
479 479 self.audit(path)
480 480 f = self.join(path)
481 481
482 482 if not text and "b" not in mode:
483 483 mode += "b" # for that other OS
484 484
485 485 nlink = -1
486 486 if mode not in ('r', 'rb'):
487 487 dirname, basename = util.split(f)
488 488 # If basename is empty, then the path is malformed because it points
489 489 # to a directory. Let the posixfile() call below raise IOError.
490 490 if basename:
491 491 if atomictemp:
492 492 util.ensuredirs(dirname, self.createmode, notindexed)
493 493 return util.atomictempfile(f, mode, self.createmode)
494 494 try:
495 495 if 'w' in mode:
496 496 util.unlink(f)
497 497 nlink = 0
498 498 else:
499 499 # nlinks() may behave differently for files on Windows
500 500 # shares if the file is open.
501 501 fd = util.posixfile(f)
502 502 nlink = util.nlinks(f)
503 503 if nlink < 1:
504 504 nlink = 2 # force mktempcopy (issue1922)
505 505 fd.close()
506 506 except (OSError, IOError) as e:
507 507 if e.errno != errno.ENOENT:
508 508 raise
509 509 nlink = 0
510 510 util.ensuredirs(dirname, self.createmode, notindexed)
511 511 if nlink > 0:
512 512 if self._trustnlink is None:
513 513 self._trustnlink = nlink > 1 or util.checknlink(f)
514 514 if nlink > 1 or not self._trustnlink:
515 515 util.rename(util.mktempcopy(f), f)
516 516 fp = util.posixfile(f, mode)
517 517 if nlink == 0:
518 518 self._fixfilemode(f)
519 519 return fp
520 520
521 521 def symlink(self, src, dst):
522 522 self.audit(dst)
523 523 linkname = self.join(dst)
524 524 try:
525 525 os.unlink(linkname)
526 526 except OSError:
527 527 pass
528 528
529 529 util.ensuredirs(os.path.dirname(linkname), self.createmode)
530 530
531 531 if self._cansymlink:
532 532 try:
533 533 os.symlink(src, linkname)
534 534 except OSError as err:
535 535 raise OSError(err.errno, _('could not symlink to %r: %s') %
536 536 (src, err.strerror), linkname)
537 537 else:
538 538 self.write(dst, src)
539 539
540 540 def join(self, path, *insidef):
541 541 if path:
542 542 return os.path.join(self.base, path, *insidef)
543 543 else:
544 544 return self.base
545 545
546 546 opener = vfs
547 547
548 548 class auditvfs(object):
549 549 def __init__(self, vfs):
550 550 self.vfs = vfs
551 551
552 552 def _getmustaudit(self):
553 553 return self.vfs.mustaudit
554 554
555 555 def _setmustaudit(self, onoff):
556 556 self.vfs.mustaudit = onoff
557 557
558 558 mustaudit = property(_getmustaudit, _setmustaudit)
559 559
560 560 class filtervfs(abstractvfs, auditvfs):
561 561 '''Wrapper vfs for filtering filenames with a function.'''
562 562
563 563 def __init__(self, vfs, filter):
564 564 auditvfs.__init__(self, vfs)
565 565 self._filter = filter
566 566
567 567 def __call__(self, path, *args, **kwargs):
568 568 return self.vfs(self._filter(path), *args, **kwargs)
569 569
570 570 def join(self, path, *insidef):
571 571 if path:
572 572 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
573 573 else:
574 574 return self.vfs.join(path)
575 575
576 576 filteropener = filtervfs
577 577
578 578 class readonlyvfs(abstractvfs, auditvfs):
579 579 '''Wrapper vfs preventing any writing.'''
580 580
581 581 def __init__(self, vfs):
582 582 auditvfs.__init__(self, vfs)
583 583
584 584 def __call__(self, path, mode='r', *args, **kw):
585 585 if mode not in ('r', 'rb'):
586 586 raise util.Abort('this vfs is read only')
587 587 return self.vfs(path, mode, *args, **kw)
588 588
589 589 def join(self, path, *insidef):
590 590 return self.vfs.join(path, *insidef)
591 591
592 592 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
593 593 '''yield every hg repository under path, always recursively.
594 594 The recurse flag will only control recursion into repo working dirs'''
595 595 def errhandler(err):
596 596 if err.filename == path:
597 597 raise err
598 598 samestat = getattr(os.path, 'samestat', None)
599 599 if followsym and samestat is not None:
600 600 def adddir(dirlst, dirname):
601 601 match = False
602 602 dirstat = os.stat(dirname)
603 603 for lstdirstat in dirlst:
604 604 if samestat(dirstat, lstdirstat):
605 605 match = True
606 606 break
607 607 if not match:
608 608 dirlst.append(dirstat)
609 609 return not match
610 610 else:
611 611 followsym = False
612 612
613 613 if (seen_dirs is None) and followsym:
614 614 seen_dirs = []
615 615 adddir(seen_dirs, path)
616 616 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
617 617 dirs.sort()
618 618 if '.hg' in dirs:
619 619 yield root # found a repository
620 620 qroot = os.path.join(root, '.hg', 'patches')
621 621 if os.path.isdir(os.path.join(qroot, '.hg')):
622 622 yield qroot # we have a patch queue repo here
623 623 if recurse:
624 624 # avoid recursing inside the .hg directory
625 625 dirs.remove('.hg')
626 626 else:
627 627 dirs[:] = [] # don't descend further
628 628 elif followsym:
629 629 newdirs = []
630 630 for d in dirs:
631 631 fname = os.path.join(root, d)
632 632 if adddir(seen_dirs, fname):
633 633 if os.path.islink(fname):
634 634 for hgname in walkrepos(fname, True, seen_dirs):
635 635 yield hgname
636 636 else:
637 637 newdirs.append(d)
638 638 dirs[:] = newdirs
639 639
640 640 def osrcpath():
641 641 '''return default os-specific hgrc search path'''
642 642 path = []
643 643 defaultpath = os.path.join(util.datapath, 'default.d')
644 644 if os.path.isdir(defaultpath):
645 645 for f, kind in osutil.listdir(defaultpath):
646 646 if f.endswith('.rc'):
647 647 path.append(os.path.join(defaultpath, f))
648 648 path.extend(systemrcpath())
649 649 path.extend(userrcpath())
650 650 path = [os.path.normpath(f) for f in path]
651 651 return path
652 652
653 653 _rcpath = None
654 654
655 655 def rcpath():
656 656 '''return hgrc search path. if env var HGRCPATH is set, use it.
657 657 for each item in path, if directory, use files ending in .rc,
658 658 else use item.
659 659 make HGRCPATH empty to only look in .hg/hgrc of current repo.
660 660 if no HGRCPATH, use default os-specific path.'''
661 661 global _rcpath
662 662 if _rcpath is None:
663 663 if 'HGRCPATH' in os.environ:
664 664 _rcpath = []
665 665 for p in os.environ['HGRCPATH'].split(os.pathsep):
666 666 if not p:
667 667 continue
668 668 p = util.expandpath(p)
669 669 if os.path.isdir(p):
670 670 for f, kind in osutil.listdir(p):
671 671 if f.endswith('.rc'):
672 672 _rcpath.append(os.path.join(p, f))
673 673 else:
674 674 _rcpath.append(p)
675 675 else:
676 676 _rcpath = osrcpath()
677 677 return _rcpath
678 678
679 679 def intrev(rev):
680 680 """Return integer for a given revision that can be used in comparison or
681 681 arithmetic operation"""
682 682 if rev is None:
683 683 return wdirrev
684 684 return rev
685 685
686 686 def revsingle(repo, revspec, default='.'):
687 687 if not revspec and revspec != 0:
688 688 return repo[default]
689 689
690 690 l = revrange(repo, [revspec])
691 691 if not l:
692 692 raise util.Abort(_('empty revision set'))
693 693 return repo[l.last()]
694 694
695 695 def _pairspec(revspec):
696 696 tree = revset.parse(revspec)
697 697 tree = revset.optimize(tree, True)[1] # fix up "x^:y" -> "(x^):y"
698 698 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
699 699
700 700 def revpair(repo, revs):
701 701 if not revs:
702 702 return repo.dirstate.p1(), None
703 703
704 704 l = revrange(repo, revs)
705 705
706 706 if not l:
707 707 first = second = None
708 708 elif l.isascending():
709 709 first = l.min()
710 710 second = l.max()
711 711 elif l.isdescending():
712 712 first = l.max()
713 713 second = l.min()
714 714 else:
715 715 first = l.first()
716 716 second = l.last()
717 717
718 718 if first is None:
719 719 raise util.Abort(_('empty revision range'))
720 720
721 721 # if top-level is range expression, the result must always be a pair
722 722 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
723 723 return repo.lookup(first), None
724 724
725 725 return repo.lookup(first), repo.lookup(second)
726 726
727 727 def revrange(repo, revs):
728 728 """Yield revision as strings from a list of revision specifications."""
729 729 allspecs = []
730 730 for spec in revs:
731 731 if isinstance(spec, int):
732 732 spec = revset.formatspec('rev(%d)', spec)
733 733 allspecs.append(spec)
734 734 m = revset.matchany(repo.ui, allspecs, repo)
735 735 return m(repo)
736 736
737 def meaningfulparents(repo, ctx):
738 """Return list of meaningful (or all if debug) parentrevs for rev.
739
740 For merges (two non-nullrev revisions) both parents are meaningful.
741 Otherwise the first parent revision is considered meaningful if it
742 is not the preceding revision.
743 """
744 parents = ctx.parents()
745 if len(parents) > 1:
746 return parents
747 if repo.ui.debugflag:
748 return [parents[0], repo['null']]
749 if parents[0].rev() >= intrev(ctx.rev()) - 1:
750 return []
751 return parents
752
737 753 def expandpats(pats):
738 754 '''Expand bare globs when running on windows.
739 755 On posix we assume it already has already been done by sh.'''
740 756 if not util.expandglobs:
741 757 return list(pats)
742 758 ret = []
743 759 for kindpat in pats:
744 760 kind, pat = matchmod._patsplit(kindpat, None)
745 761 if kind is None:
746 762 try:
747 763 globbed = glob.glob(pat)
748 764 except re.error:
749 765 globbed = [pat]
750 766 if globbed:
751 767 ret.extend(globbed)
752 768 continue
753 769 ret.append(kindpat)
754 770 return ret
755 771
756 772 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
757 773 badfn=None):
758 774 '''Return a matcher and the patterns that were used.
759 775 The matcher will warn about bad matches, unless an alternate badfn callback
760 776 is provided.'''
761 777 if pats == ("",):
762 778 pats = []
763 779 if opts is None:
764 780 opts = {}
765 781 if not globbed and default == 'relpath':
766 782 pats = expandpats(pats or [])
767 783
768 784 def bad(f, msg):
769 785 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
770 786
771 787 if badfn is None:
772 788 badfn = bad
773 789
774 790 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
775 791 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
776 792
777 793 if m.always():
778 794 pats = []
779 795 return m, pats
780 796
781 797 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
782 798 badfn=None):
783 799 '''Return a matcher that will warn about bad matches.'''
784 800 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
785 801
786 802 def matchall(repo):
787 803 '''Return a matcher that will efficiently match everything.'''
788 804 return matchmod.always(repo.root, repo.getcwd())
789 805
790 806 def matchfiles(repo, files, badfn=None):
791 807 '''Return a matcher that will efficiently match exactly these files.'''
792 808 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
793 809
794 810 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
795 811 if opts is None:
796 812 opts = {}
797 813 m = matcher
798 814 if dry_run is None:
799 815 dry_run = opts.get('dry_run')
800 816 if similarity is None:
801 817 similarity = float(opts.get('similarity') or 0)
802 818
803 819 ret = 0
804 820 join = lambda f: os.path.join(prefix, f)
805 821
806 822 def matchessubrepo(matcher, subpath):
807 823 if matcher.exact(subpath):
808 824 return True
809 825 for f in matcher.files():
810 826 if f.startswith(subpath):
811 827 return True
812 828 return False
813 829
814 830 wctx = repo[None]
815 831 for subpath in sorted(wctx.substate):
816 832 if opts.get('subrepos') or matchessubrepo(m, subpath):
817 833 sub = wctx.sub(subpath)
818 834 try:
819 835 submatch = matchmod.narrowmatcher(subpath, m)
820 836 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
821 837 ret = 1
822 838 except error.LookupError:
823 839 repo.ui.status(_("skipping missing subrepository: %s\n")
824 840 % join(subpath))
825 841
826 842 rejected = []
827 843 def badfn(f, msg):
828 844 if f in m.files():
829 845 m.bad(f, msg)
830 846 rejected.append(f)
831 847
832 848 badmatch = matchmod.badmatch(m, badfn)
833 849 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
834 850 badmatch)
835 851
836 852 unknownset = set(unknown + forgotten)
837 853 toprint = unknownset.copy()
838 854 toprint.update(deleted)
839 855 for abs in sorted(toprint):
840 856 if repo.ui.verbose or not m.exact(abs):
841 857 if abs in unknownset:
842 858 status = _('adding %s\n') % m.uipath(abs)
843 859 else:
844 860 status = _('removing %s\n') % m.uipath(abs)
845 861 repo.ui.status(status)
846 862
847 863 renames = _findrenames(repo, m, added + unknown, removed + deleted,
848 864 similarity)
849 865
850 866 if not dry_run:
851 867 _markchanges(repo, unknown + forgotten, deleted, renames)
852 868
853 869 for f in rejected:
854 870 if f in m.files():
855 871 return 1
856 872 return ret
857 873
858 874 def marktouched(repo, files, similarity=0.0):
859 875 '''Assert that files have somehow been operated upon. files are relative to
860 876 the repo root.'''
861 877 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
862 878 rejected = []
863 879
864 880 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
865 881
866 882 if repo.ui.verbose:
867 883 unknownset = set(unknown + forgotten)
868 884 toprint = unknownset.copy()
869 885 toprint.update(deleted)
870 886 for abs in sorted(toprint):
871 887 if abs in unknownset:
872 888 status = _('adding %s\n') % abs
873 889 else:
874 890 status = _('removing %s\n') % abs
875 891 repo.ui.status(status)
876 892
877 893 renames = _findrenames(repo, m, added + unknown, removed + deleted,
878 894 similarity)
879 895
880 896 _markchanges(repo, unknown + forgotten, deleted, renames)
881 897
882 898 for f in rejected:
883 899 if f in m.files():
884 900 return 1
885 901 return 0
886 902
887 903 def _interestingfiles(repo, matcher):
888 904 '''Walk dirstate with matcher, looking for files that addremove would care
889 905 about.
890 906
891 907 This is different from dirstate.status because it doesn't care about
892 908 whether files are modified or clean.'''
893 909 added, unknown, deleted, removed, forgotten = [], [], [], [], []
894 910 audit_path = pathutil.pathauditor(repo.root)
895 911
896 912 ctx = repo[None]
897 913 dirstate = repo.dirstate
898 914 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
899 915 full=False)
900 916 for abs, st in walkresults.iteritems():
901 917 dstate = dirstate[abs]
902 918 if dstate == '?' and audit_path.check(abs):
903 919 unknown.append(abs)
904 920 elif dstate != 'r' and not st:
905 921 deleted.append(abs)
906 922 elif dstate == 'r' and st:
907 923 forgotten.append(abs)
908 924 # for finding renames
909 925 elif dstate == 'r' and not st:
910 926 removed.append(abs)
911 927 elif dstate == 'a':
912 928 added.append(abs)
913 929
914 930 return added, unknown, deleted, removed, forgotten
915 931
916 932 def _findrenames(repo, matcher, added, removed, similarity):
917 933 '''Find renames from removed files to added ones.'''
918 934 renames = {}
919 935 if similarity > 0:
920 936 for old, new, score in similar.findrenames(repo, added, removed,
921 937 similarity):
922 938 if (repo.ui.verbose or not matcher.exact(old)
923 939 or not matcher.exact(new)):
924 940 repo.ui.status(_('recording removal of %s as rename to %s '
925 941 '(%d%% similar)\n') %
926 942 (matcher.rel(old), matcher.rel(new),
927 943 score * 100))
928 944 renames[new] = old
929 945 return renames
930 946
931 947 def _markchanges(repo, unknown, deleted, renames):
932 948 '''Marks the files in unknown as added, the files in deleted as removed,
933 949 and the files in renames as copied.'''
934 950 wctx = repo[None]
935 951 wlock = repo.wlock()
936 952 try:
937 953 wctx.forget(deleted)
938 954 wctx.add(unknown)
939 955 for new, old in renames.iteritems():
940 956 wctx.copy(old, new)
941 957 finally:
942 958 wlock.release()
943 959
944 960 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
945 961 """Update the dirstate to reflect the intent of copying src to dst. For
946 962 different reasons it might not end with dst being marked as copied from src.
947 963 """
948 964 origsrc = repo.dirstate.copied(src) or src
949 965 if dst == origsrc: # copying back a copy?
950 966 if repo.dirstate[dst] not in 'mn' and not dryrun:
951 967 repo.dirstate.normallookup(dst)
952 968 else:
953 969 if repo.dirstate[origsrc] == 'a' and origsrc == src:
954 970 if not ui.quiet:
955 971 ui.warn(_("%s has not been committed yet, so no copy "
956 972 "data will be stored for %s.\n")
957 973 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
958 974 if repo.dirstate[dst] in '?r' and not dryrun:
959 975 wctx.add([dst])
960 976 elif not dryrun:
961 977 wctx.copy(origsrc, dst)
962 978
963 979 def readrequires(opener, supported):
964 980 '''Reads and parses .hg/requires and checks if all entries found
965 981 are in the list of supported features.'''
966 982 requirements = set(opener.read("requires").splitlines())
967 983 missings = []
968 984 for r in requirements:
969 985 if r not in supported:
970 986 if not r or not r[0].isalnum():
971 987 raise error.RequirementError(_(".hg/requires file is corrupt"))
972 988 missings.append(r)
973 989 missings.sort()
974 990 if missings:
975 991 raise error.RequirementError(
976 992 _("repository requires features unknown to this Mercurial: %s")
977 993 % " ".join(missings),
978 994 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
979 995 " for more information"))
980 996 return requirements
981 997
982 998 def writerequires(opener, requirements):
983 999 reqfile = opener("requires", "w")
984 1000 for r in sorted(requirements):
985 1001 reqfile.write("%s\n" % r)
986 1002 reqfile.close()
987 1003
988 1004 class filecachesubentry(object):
989 1005 def __init__(self, path, stat):
990 1006 self.path = path
991 1007 self.cachestat = None
992 1008 self._cacheable = None
993 1009
994 1010 if stat:
995 1011 self.cachestat = filecachesubentry.stat(self.path)
996 1012
997 1013 if self.cachestat:
998 1014 self._cacheable = self.cachestat.cacheable()
999 1015 else:
1000 1016 # None means we don't know yet
1001 1017 self._cacheable = None
1002 1018
1003 1019 def refresh(self):
1004 1020 if self.cacheable():
1005 1021 self.cachestat = filecachesubentry.stat(self.path)
1006 1022
1007 1023 def cacheable(self):
1008 1024 if self._cacheable is not None:
1009 1025 return self._cacheable
1010 1026
1011 1027 # we don't know yet, assume it is for now
1012 1028 return True
1013 1029
1014 1030 def changed(self):
1015 1031 # no point in going further if we can't cache it
1016 1032 if not self.cacheable():
1017 1033 return True
1018 1034
1019 1035 newstat = filecachesubentry.stat(self.path)
1020 1036
1021 1037 # we may not know if it's cacheable yet, check again now
1022 1038 if newstat and self._cacheable is None:
1023 1039 self._cacheable = newstat.cacheable()
1024 1040
1025 1041 # check again
1026 1042 if not self._cacheable:
1027 1043 return True
1028 1044
1029 1045 if self.cachestat != newstat:
1030 1046 self.cachestat = newstat
1031 1047 return True
1032 1048 else:
1033 1049 return False
1034 1050
1035 1051 @staticmethod
1036 1052 def stat(path):
1037 1053 try:
1038 1054 return util.cachestat(path)
1039 1055 except OSError as e:
1040 1056 if e.errno != errno.ENOENT:
1041 1057 raise
1042 1058
1043 1059 class filecacheentry(object):
1044 1060 def __init__(self, paths, stat=True):
1045 1061 self._entries = []
1046 1062 for path in paths:
1047 1063 self._entries.append(filecachesubentry(path, stat))
1048 1064
1049 1065 def changed(self):
1050 1066 '''true if any entry has changed'''
1051 1067 for entry in self._entries:
1052 1068 if entry.changed():
1053 1069 return True
1054 1070 return False
1055 1071
1056 1072 def refresh(self):
1057 1073 for entry in self._entries:
1058 1074 entry.refresh()
1059 1075
1060 1076 class filecache(object):
1061 1077 '''A property like decorator that tracks files under .hg/ for updates.
1062 1078
1063 1079 Records stat info when called in _filecache.
1064 1080
1065 1081 On subsequent calls, compares old stat info with new info, and recreates the
1066 1082 object when any of the files changes, updating the new stat info in
1067 1083 _filecache.
1068 1084
1069 1085 Mercurial either atomic renames or appends for files under .hg,
1070 1086 so to ensure the cache is reliable we need the filesystem to be able
1071 1087 to tell us if a file has been replaced. If it can't, we fallback to
1072 1088 recreating the object on every call (essentially the same behavior as
1073 1089 propertycache).
1074 1090
1075 1091 '''
1076 1092 def __init__(self, *paths):
1077 1093 self.paths = paths
1078 1094
1079 1095 def join(self, obj, fname):
1080 1096 """Used to compute the runtime path of a cached file.
1081 1097
1082 1098 Users should subclass filecache and provide their own version of this
1083 1099 function to call the appropriate join function on 'obj' (an instance
1084 1100 of the class that its member function was decorated).
1085 1101 """
1086 1102 return obj.join(fname)
1087 1103
1088 1104 def __call__(self, func):
1089 1105 self.func = func
1090 1106 self.name = func.__name__
1091 1107 return self
1092 1108
1093 1109 def __get__(self, obj, type=None):
1094 1110 # do we need to check if the file changed?
1095 1111 if self.name in obj.__dict__:
1096 1112 assert self.name in obj._filecache, self.name
1097 1113 return obj.__dict__[self.name]
1098 1114
1099 1115 entry = obj._filecache.get(self.name)
1100 1116
1101 1117 if entry:
1102 1118 if entry.changed():
1103 1119 entry.obj = self.func(obj)
1104 1120 else:
1105 1121 paths = [self.join(obj, path) for path in self.paths]
1106 1122
1107 1123 # We stat -before- creating the object so our cache doesn't lie if
1108 1124 # a writer modified between the time we read and stat
1109 1125 entry = filecacheentry(paths, True)
1110 1126 entry.obj = self.func(obj)
1111 1127
1112 1128 obj._filecache[self.name] = entry
1113 1129
1114 1130 obj.__dict__[self.name] = entry.obj
1115 1131 return entry.obj
1116 1132
1117 1133 def __set__(self, obj, value):
1118 1134 if self.name not in obj._filecache:
1119 1135 # we add an entry for the missing value because X in __dict__
1120 1136 # implies X in _filecache
1121 1137 paths = [self.join(obj, path) for path in self.paths]
1122 1138 ce = filecacheentry(paths, False)
1123 1139 obj._filecache[self.name] = ce
1124 1140 else:
1125 1141 ce = obj._filecache[self.name]
1126 1142
1127 1143 ce.obj = value # update cached copy
1128 1144 obj.__dict__[self.name] = value # update copy returned by obj.x
1129 1145
1130 1146 def __delete__(self, obj):
1131 1147 try:
1132 1148 del obj.__dict__[self.name]
1133 1149 except KeyError:
1134 1150 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now