##// END OF EJS Templates
match: added matchessubrepo method to matcher...
Hannes Oldenburg -
r29758:2372182e default
parent child Browse files
Show More
@@ -1,3574 +1,3562 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import sys
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 )
24 24
25 25 from . import (
26 26 bookmarks,
27 27 changelog,
28 28 copies,
29 29 crecord as crecordmod,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 lock as lockmod,
35 35 match as matchmod,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 repair,
41 41 revlog,
42 42 revset,
43 43 scmutil,
44 44 templatekw,
45 45 templater,
46 46 util,
47 47 )
48 48 stringio = util.stringio
49 49
50 50 def ishunk(x):
51 51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
52 52 return isinstance(x, hunkclasses)
53 53
54 54 def newandmodified(chunks, originalchunks):
55 55 newlyaddedandmodifiedfiles = set()
56 56 for chunk in chunks:
57 57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
58 58 originalchunks:
59 59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
60 60 return newlyaddedandmodifiedfiles
61 61
62 62 def parsealiases(cmd):
63 63 return cmd.lstrip("^").split("|")
64 64
65 65 def setupwrapcolorwrite(ui):
66 66 # wrap ui.write so diff output can be labeled/colorized
67 67 def wrapwrite(orig, *args, **kw):
68 68 label = kw.pop('label', '')
69 69 for chunk, l in patch.difflabel(lambda: args):
70 70 orig(chunk, label=label + l)
71 71
72 72 oldwrite = ui.write
73 73 def wrap(*args, **kwargs):
74 74 return wrapwrite(oldwrite, *args, **kwargs)
75 75 setattr(ui, 'write', wrap)
76 76 return oldwrite
77 77
78 78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
79 79 if usecurses:
80 80 if testfile:
81 81 recordfn = crecordmod.testdecorator(testfile,
82 82 crecordmod.testchunkselector)
83 83 else:
84 84 recordfn = crecordmod.chunkselector
85 85
86 86 return crecordmod.filterpatch(ui, originalhunks, recordfn)
87 87
88 88 else:
89 89 return patch.filterpatch(ui, originalhunks, operation)
90 90
91 91 def recordfilter(ui, originalhunks, operation=None):
92 92 """ Prompts the user to filter the originalhunks and return a list of
93 93 selected hunks.
94 94 *operation* is used for to build ui messages to indicate the user what
95 95 kind of filtering they are doing: reverting, committing, shelving, etc.
96 96 (see patch.filterpatch).
97 97 """
98 98 usecurses = crecordmod.checkcurses(ui)
99 99 testfile = ui.config('experimental', 'crecordtest', None)
100 100 oldwrite = setupwrapcolorwrite(ui)
101 101 try:
102 102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
103 103 testfile, operation)
104 104 finally:
105 105 ui.write = oldwrite
106 106 return newchunks, newopts
107 107
108 108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
109 109 filterfn, *pats, **opts):
110 110 from . import merge as mergemod
111 111 if not ui.interactive():
112 112 if cmdsuggest:
113 113 msg = _('running non-interactively, use %s instead') % cmdsuggest
114 114 else:
115 115 msg = _('running non-interactively')
116 116 raise error.Abort(msg)
117 117
118 118 # make sure username is set before going interactive
119 119 if not opts.get('user'):
120 120 ui.username() # raise exception, username not provided
121 121
122 122 def recordfunc(ui, repo, message, match, opts):
123 123 """This is generic record driver.
124 124
125 125 Its job is to interactively filter local changes, and
126 126 accordingly prepare working directory into a state in which the
127 127 job can be delegated to a non-interactive commit command such as
128 128 'commit' or 'qrefresh'.
129 129
130 130 After the actual job is done by non-interactive command, the
131 131 working directory is restored to its original state.
132 132
133 133 In the end we'll record interesting changes, and everything else
134 134 will be left in place, so the user can continue working.
135 135 """
136 136
137 137 checkunfinished(repo, commit=True)
138 138 wctx = repo[None]
139 139 merge = len(wctx.parents()) > 1
140 140 if merge:
141 141 raise error.Abort(_('cannot partially commit a merge '
142 142 '(use "hg commit" instead)'))
143 143
144 144 def fail(f, msg):
145 145 raise error.Abort('%s: %s' % (f, msg))
146 146
147 147 force = opts.get('force')
148 148 if not force:
149 149 vdirs = []
150 150 match.explicitdir = vdirs.append
151 151 match.bad = fail
152 152
153 153 status = repo.status(match=match)
154 154 if not force:
155 155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
156 156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
157 157 diffopts.nodates = True
158 158 diffopts.git = True
159 159 diffopts.showfunc = True
160 160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
161 161 originalchunks = patch.parsepatch(originaldiff)
162 162
163 163 # 1. filter patch, since we are intending to apply subset of it
164 164 try:
165 165 chunks, newopts = filterfn(ui, originalchunks)
166 166 except patch.PatchError as err:
167 167 raise error.Abort(_('error parsing patch: %s') % err)
168 168 opts.update(newopts)
169 169
170 170 # We need to keep a backup of files that have been newly added and
171 171 # modified during the recording process because there is a previous
172 172 # version without the edit in the workdir
173 173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
174 174 contenders = set()
175 175 for h in chunks:
176 176 try:
177 177 contenders.update(set(h.files()))
178 178 except AttributeError:
179 179 pass
180 180
181 181 changed = status.modified + status.added + status.removed
182 182 newfiles = [f for f in changed if f in contenders]
183 183 if not newfiles:
184 184 ui.status(_('no changes to record\n'))
185 185 return 0
186 186
187 187 modified = set(status.modified)
188 188
189 189 # 2. backup changed files, so we can restore them in the end
190 190
191 191 if backupall:
192 192 tobackup = changed
193 193 else:
194 194 tobackup = [f for f in newfiles if f in modified or f in \
195 195 newlyaddedandmodifiedfiles]
196 196 backups = {}
197 197 if tobackup:
198 198 backupdir = repo.join('record-backups')
199 199 try:
200 200 os.mkdir(backupdir)
201 201 except OSError as err:
202 202 if err.errno != errno.EEXIST:
203 203 raise
204 204 try:
205 205 # backup continues
206 206 for f in tobackup:
207 207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
208 208 dir=backupdir)
209 209 os.close(fd)
210 210 ui.debug('backup %r as %r\n' % (f, tmpname))
211 211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
212 212 backups[f] = tmpname
213 213
214 214 fp = stringio()
215 215 for c in chunks:
216 216 fname = c.filename()
217 217 if fname in backups:
218 218 c.write(fp)
219 219 dopatch = fp.tell()
220 220 fp.seek(0)
221 221
222 222 # 2.5 optionally review / modify patch in text editor
223 223 if opts.get('review', False):
224 224 patchtext = (crecordmod.diffhelptext
225 225 + crecordmod.patchhelptext
226 226 + fp.read())
227 227 reviewedpatch = ui.edit(patchtext, "",
228 228 extra={"suffix": ".diff"})
229 229 fp.truncate(0)
230 230 fp.write(reviewedpatch)
231 231 fp.seek(0)
232 232
233 233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
234 234 # 3a. apply filtered patch to clean repo (clean)
235 235 if backups:
236 236 # Equivalent to hg.revert
237 237 m = scmutil.matchfiles(repo, backups.keys())
238 238 mergemod.update(repo, repo.dirstate.p1(),
239 239 False, True, matcher=m)
240 240
241 241 # 3b. (apply)
242 242 if dopatch:
243 243 try:
244 244 ui.debug('applying patch\n')
245 245 ui.debug(fp.getvalue())
246 246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
247 247 except patch.PatchError as err:
248 248 raise error.Abort(str(err))
249 249 del fp
250 250
251 251 # 4. We prepared working directory according to filtered
252 252 # patch. Now is the time to delegate the job to
253 253 # commit/qrefresh or the like!
254 254
255 255 # Make all of the pathnames absolute.
256 256 newfiles = [repo.wjoin(nf) for nf in newfiles]
257 257 return commitfunc(ui, repo, *newfiles, **opts)
258 258 finally:
259 259 # 5. finally restore backed-up files
260 260 try:
261 261 dirstate = repo.dirstate
262 262 for realname, tmpname in backups.iteritems():
263 263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
264 264
265 265 if dirstate[realname] == 'n':
266 266 # without normallookup, restoring timestamp
267 267 # may cause partially committed files
268 268 # to be treated as unmodified
269 269 dirstate.normallookup(realname)
270 270
271 271 # copystat=True here and above are a hack to trick any
272 272 # editors that have f open that we haven't modified them.
273 273 #
274 274 # Also note that this racy as an editor could notice the
275 275 # file's mtime before we've finished writing it.
276 276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
277 277 os.unlink(tmpname)
278 278 if tobackup:
279 279 os.rmdir(backupdir)
280 280 except OSError:
281 281 pass
282 282
283 283 def recordinwlock(ui, repo, message, match, opts):
284 284 with repo.wlock():
285 285 return recordfunc(ui, repo, message, match, opts)
286 286
287 287 return commit(ui, repo, recordinwlock, pats, opts)
288 288
289 289 def findpossible(cmd, table, strict=False):
290 290 """
291 291 Return cmd -> (aliases, command table entry)
292 292 for each matching command.
293 293 Return debug commands (or their aliases) only if no normal command matches.
294 294 """
295 295 choice = {}
296 296 debugchoice = {}
297 297
298 298 if cmd in table:
299 299 # short-circuit exact matches, "log" alias beats "^log|history"
300 300 keys = [cmd]
301 301 else:
302 302 keys = table.keys()
303 303
304 304 allcmds = []
305 305 for e in keys:
306 306 aliases = parsealiases(e)
307 307 allcmds.extend(aliases)
308 308 found = None
309 309 if cmd in aliases:
310 310 found = cmd
311 311 elif not strict:
312 312 for a in aliases:
313 313 if a.startswith(cmd):
314 314 found = a
315 315 break
316 316 if found is not None:
317 317 if aliases[0].startswith("debug") or found.startswith("debug"):
318 318 debugchoice[found] = (aliases, table[e])
319 319 else:
320 320 choice[found] = (aliases, table[e])
321 321
322 322 if not choice and debugchoice:
323 323 choice = debugchoice
324 324
325 325 return choice, allcmds
326 326
327 327 def findcmd(cmd, table, strict=True):
328 328 """Return (aliases, command table entry) for command string."""
329 329 choice, allcmds = findpossible(cmd, table, strict)
330 330
331 331 if cmd in choice:
332 332 return choice[cmd]
333 333
334 334 if len(choice) > 1:
335 335 clist = choice.keys()
336 336 clist.sort()
337 337 raise error.AmbiguousCommand(cmd, clist)
338 338
339 339 if choice:
340 340 return choice.values()[0]
341 341
342 342 raise error.UnknownCommand(cmd, allcmds)
343 343
344 344 def findrepo(p):
345 345 while not os.path.isdir(os.path.join(p, ".hg")):
346 346 oldp, p = p, os.path.dirname(p)
347 347 if p == oldp:
348 348 return None
349 349
350 350 return p
351 351
352 352 def bailifchanged(repo, merge=True):
353 353 if merge and repo.dirstate.p2() != nullid:
354 354 raise error.Abort(_('outstanding uncommitted merge'))
355 355 modified, added, removed, deleted = repo.status()[:4]
356 356 if modified or added or removed or deleted:
357 357 raise error.Abort(_('uncommitted changes'))
358 358 ctx = repo[None]
359 359 for s in sorted(ctx.substate):
360 360 ctx.sub(s).bailifchanged()
361 361
362 362 def logmessage(ui, opts):
363 363 """ get the log message according to -m and -l option """
364 364 message = opts.get('message')
365 365 logfile = opts.get('logfile')
366 366
367 367 if message and logfile:
368 368 raise error.Abort(_('options --message and --logfile are mutually '
369 369 'exclusive'))
370 370 if not message and logfile:
371 371 try:
372 372 if logfile == '-':
373 373 message = ui.fin.read()
374 374 else:
375 375 message = '\n'.join(util.readfile(logfile).splitlines())
376 376 except IOError as inst:
377 377 raise error.Abort(_("can't read commit message '%s': %s") %
378 378 (logfile, inst.strerror))
379 379 return message
380 380
381 381 def mergeeditform(ctxorbool, baseformname):
382 382 """return appropriate editform name (referencing a committemplate)
383 383
384 384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
385 385 merging is committed.
386 386
387 387 This returns baseformname with '.merge' appended if it is a merge,
388 388 otherwise '.normal' is appended.
389 389 """
390 390 if isinstance(ctxorbool, bool):
391 391 if ctxorbool:
392 392 return baseformname + ".merge"
393 393 elif 1 < len(ctxorbool.parents()):
394 394 return baseformname + ".merge"
395 395
396 396 return baseformname + ".normal"
397 397
398 398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
399 399 editform='', **opts):
400 400 """get appropriate commit message editor according to '--edit' option
401 401
402 402 'finishdesc' is a function to be called with edited commit message
403 403 (= 'description' of the new changeset) just after editing, but
404 404 before checking empty-ness. It should return actual text to be
405 405 stored into history. This allows to change description before
406 406 storing.
407 407
408 408 'extramsg' is a extra message to be shown in the editor instead of
409 409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
410 410 is automatically added.
411 411
412 412 'editform' is a dot-separated list of names, to distinguish
413 413 the purpose of commit text editing.
414 414
415 415 'getcommiteditor' returns 'commitforceeditor' regardless of
416 416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
417 417 they are specific for usage in MQ.
418 418 """
419 419 if edit or finishdesc or extramsg:
420 420 return lambda r, c, s: commitforceeditor(r, c, s,
421 421 finishdesc=finishdesc,
422 422 extramsg=extramsg,
423 423 editform=editform)
424 424 elif editform:
425 425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
426 426 else:
427 427 return commiteditor
428 428
429 429 def loglimit(opts):
430 430 """get the log limit according to option -l/--limit"""
431 431 limit = opts.get('limit')
432 432 if limit:
433 433 try:
434 434 limit = int(limit)
435 435 except ValueError:
436 436 raise error.Abort(_('limit must be a positive integer'))
437 437 if limit <= 0:
438 438 raise error.Abort(_('limit must be positive'))
439 439 else:
440 440 limit = None
441 441 return limit
442 442
443 443 def makefilename(repo, pat, node, desc=None,
444 444 total=None, seqno=None, revwidth=None, pathname=None):
445 445 node_expander = {
446 446 'H': lambda: hex(node),
447 447 'R': lambda: str(repo.changelog.rev(node)),
448 448 'h': lambda: short(node),
449 449 'm': lambda: re.sub('[^\w]', '_', str(desc))
450 450 }
451 451 expander = {
452 452 '%': lambda: '%',
453 453 'b': lambda: os.path.basename(repo.root),
454 454 }
455 455
456 456 try:
457 457 if node:
458 458 expander.update(node_expander)
459 459 if node:
460 460 expander['r'] = (lambda:
461 461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
462 462 if total is not None:
463 463 expander['N'] = lambda: str(total)
464 464 if seqno is not None:
465 465 expander['n'] = lambda: str(seqno)
466 466 if total is not None and seqno is not None:
467 467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
468 468 if pathname is not None:
469 469 expander['s'] = lambda: os.path.basename(pathname)
470 470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
471 471 expander['p'] = lambda: pathname
472 472
473 473 newname = []
474 474 patlen = len(pat)
475 475 i = 0
476 476 while i < patlen:
477 477 c = pat[i]
478 478 if c == '%':
479 479 i += 1
480 480 c = pat[i]
481 481 c = expander[c]()
482 482 newname.append(c)
483 483 i += 1
484 484 return ''.join(newname)
485 485 except KeyError as inst:
486 486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
487 487 inst.args[0])
488 488
489 489 class _unclosablefile(object):
490 490 def __init__(self, fp):
491 491 self._fp = fp
492 492
493 493 def close(self):
494 494 pass
495 495
496 496 def __iter__(self):
497 497 return iter(self._fp)
498 498
499 499 def __getattr__(self, attr):
500 500 return getattr(self._fp, attr)
501 501
502 502 def makefileobj(repo, pat, node=None, desc=None, total=None,
503 503 seqno=None, revwidth=None, mode='wb', modemap=None,
504 504 pathname=None):
505 505
506 506 writable = mode not in ('r', 'rb')
507 507
508 508 if not pat or pat == '-':
509 509 if writable:
510 510 fp = repo.ui.fout
511 511 else:
512 512 fp = repo.ui.fin
513 513 return _unclosablefile(fp)
514 514 if util.safehasattr(pat, 'write') and writable:
515 515 return pat
516 516 if util.safehasattr(pat, 'read') and 'r' in mode:
517 517 return pat
518 518 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
519 519 if modemap is not None:
520 520 mode = modemap.get(fn, mode)
521 521 if mode == 'wb':
522 522 modemap[fn] = 'ab'
523 523 return open(fn, mode)
524 524
525 525 def openrevlog(repo, cmd, file_, opts):
526 526 """opens the changelog, manifest, a filelog or a given revlog"""
527 527 cl = opts['changelog']
528 528 mf = opts['manifest']
529 529 dir = opts['dir']
530 530 msg = None
531 531 if cl and mf:
532 532 msg = _('cannot specify --changelog and --manifest at the same time')
533 533 elif cl and dir:
534 534 msg = _('cannot specify --changelog and --dir at the same time')
535 535 elif cl or mf or dir:
536 536 if file_:
537 537 msg = _('cannot specify filename with --changelog or --manifest')
538 538 elif not repo:
539 539 msg = _('cannot specify --changelog or --manifest or --dir '
540 540 'without a repository')
541 541 if msg:
542 542 raise error.Abort(msg)
543 543
544 544 r = None
545 545 if repo:
546 546 if cl:
547 547 r = repo.unfiltered().changelog
548 548 elif dir:
549 549 if 'treemanifest' not in repo.requirements:
550 550 raise error.Abort(_("--dir can only be used on repos with "
551 551 "treemanifest enabled"))
552 552 dirlog = repo.manifest.dirlog(dir)
553 553 if len(dirlog):
554 554 r = dirlog
555 555 elif mf:
556 556 r = repo.manifest
557 557 elif file_:
558 558 filelog = repo.file(file_)
559 559 if len(filelog):
560 560 r = filelog
561 561 if not r:
562 562 if not file_:
563 563 raise error.CommandError(cmd, _('invalid arguments'))
564 564 if not os.path.isfile(file_):
565 565 raise error.Abort(_("revlog '%s' not found") % file_)
566 566 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
567 567 file_[:-2] + ".i")
568 568 return r
569 569
570 570 def copy(ui, repo, pats, opts, rename=False):
571 571 # called with the repo lock held
572 572 #
573 573 # hgsep => pathname that uses "/" to separate directories
574 574 # ossep => pathname that uses os.sep to separate directories
575 575 cwd = repo.getcwd()
576 576 targets = {}
577 577 after = opts.get("after")
578 578 dryrun = opts.get("dry_run")
579 579 wctx = repo[None]
580 580
581 581 def walkpat(pat):
582 582 srcs = []
583 583 if after:
584 584 badstates = '?'
585 585 else:
586 586 badstates = '?r'
587 587 m = scmutil.match(repo[None], [pat], opts, globbed=True)
588 588 for abs in repo.walk(m):
589 589 state = repo.dirstate[abs]
590 590 rel = m.rel(abs)
591 591 exact = m.exact(abs)
592 592 if state in badstates:
593 593 if exact and state == '?':
594 594 ui.warn(_('%s: not copying - file is not managed\n') % rel)
595 595 if exact and state == 'r':
596 596 ui.warn(_('%s: not copying - file has been marked for'
597 597 ' remove\n') % rel)
598 598 continue
599 599 # abs: hgsep
600 600 # rel: ossep
601 601 srcs.append((abs, rel, exact))
602 602 return srcs
603 603
604 604 # abssrc: hgsep
605 605 # relsrc: ossep
606 606 # otarget: ossep
607 607 def copyfile(abssrc, relsrc, otarget, exact):
608 608 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
609 609 if '/' in abstarget:
610 610 # We cannot normalize abstarget itself, this would prevent
611 611 # case only renames, like a => A.
612 612 abspath, absname = abstarget.rsplit('/', 1)
613 613 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
614 614 reltarget = repo.pathto(abstarget, cwd)
615 615 target = repo.wjoin(abstarget)
616 616 src = repo.wjoin(abssrc)
617 617 state = repo.dirstate[abstarget]
618 618
619 619 scmutil.checkportable(ui, abstarget)
620 620
621 621 # check for collisions
622 622 prevsrc = targets.get(abstarget)
623 623 if prevsrc is not None:
624 624 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
625 625 (reltarget, repo.pathto(abssrc, cwd),
626 626 repo.pathto(prevsrc, cwd)))
627 627 return
628 628
629 629 # check for overwrites
630 630 exists = os.path.lexists(target)
631 631 samefile = False
632 632 if exists and abssrc != abstarget:
633 633 if (repo.dirstate.normalize(abssrc) ==
634 634 repo.dirstate.normalize(abstarget)):
635 635 if not rename:
636 636 ui.warn(_("%s: can't copy - same file\n") % reltarget)
637 637 return
638 638 exists = False
639 639 samefile = True
640 640
641 641 if not after and exists or after and state in 'mn':
642 642 if not opts['force']:
643 643 ui.warn(_('%s: not overwriting - file exists\n') %
644 644 reltarget)
645 645 return
646 646
647 647 if after:
648 648 if not exists:
649 649 if rename:
650 650 ui.warn(_('%s: not recording move - %s does not exist\n') %
651 651 (relsrc, reltarget))
652 652 else:
653 653 ui.warn(_('%s: not recording copy - %s does not exist\n') %
654 654 (relsrc, reltarget))
655 655 return
656 656 elif not dryrun:
657 657 try:
658 658 if exists:
659 659 os.unlink(target)
660 660 targetdir = os.path.dirname(target) or '.'
661 661 if not os.path.isdir(targetdir):
662 662 os.makedirs(targetdir)
663 663 if samefile:
664 664 tmp = target + "~hgrename"
665 665 os.rename(src, tmp)
666 666 os.rename(tmp, target)
667 667 else:
668 668 util.copyfile(src, target)
669 669 srcexists = True
670 670 except IOError as inst:
671 671 if inst.errno == errno.ENOENT:
672 672 ui.warn(_('%s: deleted in working directory\n') % relsrc)
673 673 srcexists = False
674 674 else:
675 675 ui.warn(_('%s: cannot copy - %s\n') %
676 676 (relsrc, inst.strerror))
677 677 return True # report a failure
678 678
679 679 if ui.verbose or not exact:
680 680 if rename:
681 681 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
682 682 else:
683 683 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
684 684
685 685 targets[abstarget] = abssrc
686 686
687 687 # fix up dirstate
688 688 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
689 689 dryrun=dryrun, cwd=cwd)
690 690 if rename and not dryrun:
691 691 if not after and srcexists and not samefile:
692 692 util.unlinkpath(repo.wjoin(abssrc))
693 693 wctx.forget([abssrc])
694 694
695 695 # pat: ossep
696 696 # dest ossep
697 697 # srcs: list of (hgsep, hgsep, ossep, bool)
698 698 # return: function that takes hgsep and returns ossep
699 699 def targetpathfn(pat, dest, srcs):
700 700 if os.path.isdir(pat):
701 701 abspfx = pathutil.canonpath(repo.root, cwd, pat)
702 702 abspfx = util.localpath(abspfx)
703 703 if destdirexists:
704 704 striplen = len(os.path.split(abspfx)[0])
705 705 else:
706 706 striplen = len(abspfx)
707 707 if striplen:
708 708 striplen += len(os.sep)
709 709 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
710 710 elif destdirexists:
711 711 res = lambda p: os.path.join(dest,
712 712 os.path.basename(util.localpath(p)))
713 713 else:
714 714 res = lambda p: dest
715 715 return res
716 716
717 717 # pat: ossep
718 718 # dest ossep
719 719 # srcs: list of (hgsep, hgsep, ossep, bool)
720 720 # return: function that takes hgsep and returns ossep
721 721 def targetpathafterfn(pat, dest, srcs):
722 722 if matchmod.patkind(pat):
723 723 # a mercurial pattern
724 724 res = lambda p: os.path.join(dest,
725 725 os.path.basename(util.localpath(p)))
726 726 else:
727 727 abspfx = pathutil.canonpath(repo.root, cwd, pat)
728 728 if len(abspfx) < len(srcs[0][0]):
729 729 # A directory. Either the target path contains the last
730 730 # component of the source path or it does not.
731 731 def evalpath(striplen):
732 732 score = 0
733 733 for s in srcs:
734 734 t = os.path.join(dest, util.localpath(s[0])[striplen:])
735 735 if os.path.lexists(t):
736 736 score += 1
737 737 return score
738 738
739 739 abspfx = util.localpath(abspfx)
740 740 striplen = len(abspfx)
741 741 if striplen:
742 742 striplen += len(os.sep)
743 743 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
744 744 score = evalpath(striplen)
745 745 striplen1 = len(os.path.split(abspfx)[0])
746 746 if striplen1:
747 747 striplen1 += len(os.sep)
748 748 if evalpath(striplen1) > score:
749 749 striplen = striplen1
750 750 res = lambda p: os.path.join(dest,
751 751 util.localpath(p)[striplen:])
752 752 else:
753 753 # a file
754 754 if destdirexists:
755 755 res = lambda p: os.path.join(dest,
756 756 os.path.basename(util.localpath(p)))
757 757 else:
758 758 res = lambda p: dest
759 759 return res
760 760
761 761 pats = scmutil.expandpats(pats)
762 762 if not pats:
763 763 raise error.Abort(_('no source or destination specified'))
764 764 if len(pats) == 1:
765 765 raise error.Abort(_('no destination specified'))
766 766 dest = pats.pop()
767 767 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
768 768 if not destdirexists:
769 769 if len(pats) > 1 or matchmod.patkind(pats[0]):
770 770 raise error.Abort(_('with multiple sources, destination must be an '
771 771 'existing directory'))
772 772 if util.endswithsep(dest):
773 773 raise error.Abort(_('destination %s is not a directory') % dest)
774 774
775 775 tfn = targetpathfn
776 776 if after:
777 777 tfn = targetpathafterfn
778 778 copylist = []
779 779 for pat in pats:
780 780 srcs = walkpat(pat)
781 781 if not srcs:
782 782 continue
783 783 copylist.append((tfn(pat, dest, srcs), srcs))
784 784 if not copylist:
785 785 raise error.Abort(_('no files to copy'))
786 786
787 787 errors = 0
788 788 for targetpath, srcs in copylist:
789 789 for abssrc, relsrc, exact in srcs:
790 790 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
791 791 errors += 1
792 792
793 793 if errors:
794 794 ui.warn(_('(consider using --after)\n'))
795 795
796 796 return errors != 0
797 797
798 798 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
799 799 runargs=None, appendpid=False):
800 800 '''Run a command as a service.'''
801 801
802 802 def writepid(pid):
803 803 if opts['pid_file']:
804 804 if appendpid:
805 805 mode = 'a'
806 806 else:
807 807 mode = 'w'
808 808 fp = open(opts['pid_file'], mode)
809 809 fp.write(str(pid) + '\n')
810 810 fp.close()
811 811
812 812 if opts['daemon'] and not opts['daemon_postexec']:
813 813 # Signal child process startup with file removal
814 814 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
815 815 os.close(lockfd)
816 816 try:
817 817 if not runargs:
818 818 runargs = util.hgcmd() + sys.argv[1:]
819 819 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
820 820 # Don't pass --cwd to the child process, because we've already
821 821 # changed directory.
822 822 for i in xrange(1, len(runargs)):
823 823 if runargs[i].startswith('--cwd='):
824 824 del runargs[i]
825 825 break
826 826 elif runargs[i].startswith('--cwd'):
827 827 del runargs[i:i + 2]
828 828 break
829 829 def condfn():
830 830 return not os.path.exists(lockpath)
831 831 pid = util.rundetached(runargs, condfn)
832 832 if pid < 0:
833 833 raise error.Abort(_('child process failed to start'))
834 834 writepid(pid)
835 835 finally:
836 836 try:
837 837 os.unlink(lockpath)
838 838 except OSError as e:
839 839 if e.errno != errno.ENOENT:
840 840 raise
841 841 if parentfn:
842 842 return parentfn(pid)
843 843 else:
844 844 return
845 845
846 846 if initfn:
847 847 initfn()
848 848
849 849 if not opts['daemon']:
850 850 writepid(util.getpid())
851 851
852 852 if opts['daemon_postexec']:
853 853 try:
854 854 os.setsid()
855 855 except AttributeError:
856 856 pass
857 857 for inst in opts['daemon_postexec']:
858 858 if inst.startswith('unlink:'):
859 859 lockpath = inst[7:]
860 860 os.unlink(lockpath)
861 861 elif inst.startswith('chdir:'):
862 862 os.chdir(inst[6:])
863 863 elif inst != 'none':
864 864 raise error.Abort(_('invalid value for --daemon-postexec: %s')
865 865 % inst)
866 866 util.hidewindow()
867 867 sys.stdout.flush()
868 868 sys.stderr.flush()
869 869
870 870 nullfd = os.open(os.devnull, os.O_RDWR)
871 871 logfilefd = nullfd
872 872 if logfile:
873 873 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
874 874 os.dup2(nullfd, 0)
875 875 os.dup2(logfilefd, 1)
876 876 os.dup2(logfilefd, 2)
877 877 if nullfd not in (0, 1, 2):
878 878 os.close(nullfd)
879 879 if logfile and logfilefd not in (0, 1, 2):
880 880 os.close(logfilefd)
881 881
882 882 if runfn:
883 883 return runfn()
884 884
885 885 ## facility to let extension process additional data into an import patch
886 886 # list of identifier to be executed in order
887 887 extrapreimport = [] # run before commit
888 888 extrapostimport = [] # run after commit
889 889 # mapping from identifier to actual import function
890 890 #
891 891 # 'preimport' are run before the commit is made and are provided the following
892 892 # arguments:
893 893 # - repo: the localrepository instance,
894 894 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
895 895 # - extra: the future extra dictionary of the changeset, please mutate it,
896 896 # - opts: the import options.
897 897 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
898 898 # mutation of in memory commit and more. Feel free to rework the code to get
899 899 # there.
900 900 extrapreimportmap = {}
901 901 # 'postimport' are run after the commit is made and are provided the following
902 902 # argument:
903 903 # - ctx: the changectx created by import.
904 904 extrapostimportmap = {}
905 905
906 906 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
907 907 """Utility function used by commands.import to import a single patch
908 908
909 909 This function is explicitly defined here to help the evolve extension to
910 910 wrap this part of the import logic.
911 911
912 912 The API is currently a bit ugly because it a simple code translation from
913 913 the import command. Feel free to make it better.
914 914
915 915 :hunk: a patch (as a binary string)
916 916 :parents: nodes that will be parent of the created commit
917 917 :opts: the full dict of option passed to the import command
918 918 :msgs: list to save commit message to.
919 919 (used in case we need to save it when failing)
920 920 :updatefunc: a function that update a repo to a given node
921 921 updatefunc(<repo>, <node>)
922 922 """
923 923 # avoid cycle context -> subrepo -> cmdutil
924 924 from . import context
925 925 extractdata = patch.extract(ui, hunk)
926 926 tmpname = extractdata.get('filename')
927 927 message = extractdata.get('message')
928 928 user = opts.get('user') or extractdata.get('user')
929 929 date = opts.get('date') or extractdata.get('date')
930 930 branch = extractdata.get('branch')
931 931 nodeid = extractdata.get('nodeid')
932 932 p1 = extractdata.get('p1')
933 933 p2 = extractdata.get('p2')
934 934
935 935 nocommit = opts.get('no_commit')
936 936 importbranch = opts.get('import_branch')
937 937 update = not opts.get('bypass')
938 938 strip = opts["strip"]
939 939 prefix = opts["prefix"]
940 940 sim = float(opts.get('similarity') or 0)
941 941 if not tmpname:
942 942 return (None, None, False)
943 943
944 944 rejects = False
945 945
946 946 try:
947 947 cmdline_message = logmessage(ui, opts)
948 948 if cmdline_message:
949 949 # pickup the cmdline msg
950 950 message = cmdline_message
951 951 elif message:
952 952 # pickup the patch msg
953 953 message = message.strip()
954 954 else:
955 955 # launch the editor
956 956 message = None
957 957 ui.debug('message:\n%s\n' % message)
958 958
959 959 if len(parents) == 1:
960 960 parents.append(repo[nullid])
961 961 if opts.get('exact'):
962 962 if not nodeid or not p1:
963 963 raise error.Abort(_('not a Mercurial patch'))
964 964 p1 = repo[p1]
965 965 p2 = repo[p2 or nullid]
966 966 elif p2:
967 967 try:
968 968 p1 = repo[p1]
969 969 p2 = repo[p2]
970 970 # Without any options, consider p2 only if the
971 971 # patch is being applied on top of the recorded
972 972 # first parent.
973 973 if p1 != parents[0]:
974 974 p1 = parents[0]
975 975 p2 = repo[nullid]
976 976 except error.RepoError:
977 977 p1, p2 = parents
978 978 if p2.node() == nullid:
979 979 ui.warn(_("warning: import the patch as a normal revision\n"
980 980 "(use --exact to import the patch as a merge)\n"))
981 981 else:
982 982 p1, p2 = parents
983 983
984 984 n = None
985 985 if update:
986 986 if p1 != parents[0]:
987 987 updatefunc(repo, p1.node())
988 988 if p2 != parents[1]:
989 989 repo.setparents(p1.node(), p2.node())
990 990
991 991 if opts.get('exact') or importbranch:
992 992 repo.dirstate.setbranch(branch or 'default')
993 993
994 994 partial = opts.get('partial', False)
995 995 files = set()
996 996 try:
997 997 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
998 998 files=files, eolmode=None, similarity=sim / 100.0)
999 999 except patch.PatchError as e:
1000 1000 if not partial:
1001 1001 raise error.Abort(str(e))
1002 1002 if partial:
1003 1003 rejects = True
1004 1004
1005 1005 files = list(files)
1006 1006 if nocommit:
1007 1007 if message:
1008 1008 msgs.append(message)
1009 1009 else:
1010 1010 if opts.get('exact') or p2:
1011 1011 # If you got here, you either use --force and know what
1012 1012 # you are doing or used --exact or a merge patch while
1013 1013 # being updated to its first parent.
1014 1014 m = None
1015 1015 else:
1016 1016 m = scmutil.matchfiles(repo, files or [])
1017 1017 editform = mergeeditform(repo[None], 'import.normal')
1018 1018 if opts.get('exact'):
1019 1019 editor = None
1020 1020 else:
1021 1021 editor = getcommiteditor(editform=editform, **opts)
1022 1022 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1023 1023 extra = {}
1024 1024 for idfunc in extrapreimport:
1025 1025 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1026 1026 try:
1027 1027 if partial:
1028 1028 repo.ui.setconfig('ui', 'allowemptycommit', True)
1029 1029 n = repo.commit(message, user,
1030 1030 date, match=m,
1031 1031 editor=editor, extra=extra)
1032 1032 for idfunc in extrapostimport:
1033 1033 extrapostimportmap[idfunc](repo[n])
1034 1034 finally:
1035 1035 repo.ui.restoreconfig(allowemptyback)
1036 1036 else:
1037 1037 if opts.get('exact') or importbranch:
1038 1038 branch = branch or 'default'
1039 1039 else:
1040 1040 branch = p1.branch()
1041 1041 store = patch.filestore()
1042 1042 try:
1043 1043 files = set()
1044 1044 try:
1045 1045 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1046 1046 files, eolmode=None)
1047 1047 except patch.PatchError as e:
1048 1048 raise error.Abort(str(e))
1049 1049 if opts.get('exact'):
1050 1050 editor = None
1051 1051 else:
1052 1052 editor = getcommiteditor(editform='import.bypass')
1053 1053 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1054 1054 message,
1055 1055 user,
1056 1056 date,
1057 1057 branch, files, store,
1058 1058 editor=editor)
1059 1059 n = memctx.commit()
1060 1060 finally:
1061 1061 store.close()
1062 1062 if opts.get('exact') and nocommit:
1063 1063 # --exact with --no-commit is still useful in that it does merge
1064 1064 # and branch bits
1065 1065 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1066 1066 elif opts.get('exact') and hex(n) != nodeid:
1067 1067 raise error.Abort(_('patch is damaged or loses information'))
1068 1068 msg = _('applied to working directory')
1069 1069 if n:
1070 1070 # i18n: refers to a short changeset id
1071 1071 msg = _('created %s') % short(n)
1072 1072 return (msg, n, rejects)
1073 1073 finally:
1074 1074 os.unlink(tmpname)
1075 1075
1076 1076 # facility to let extensions include additional data in an exported patch
1077 1077 # list of identifiers to be executed in order
1078 1078 extraexport = []
1079 1079 # mapping from identifier to actual export function
1080 1080 # function as to return a string to be added to the header or None
1081 1081 # it is given two arguments (sequencenumber, changectx)
1082 1082 extraexportmap = {}
1083 1083
1084 1084 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1085 1085 opts=None, match=None):
1086 1086 '''export changesets as hg patches.'''
1087 1087
1088 1088 total = len(revs)
1089 1089 revwidth = max([len(str(rev)) for rev in revs])
1090 1090 filemode = {}
1091 1091
1092 1092 def single(rev, seqno, fp):
1093 1093 ctx = repo[rev]
1094 1094 node = ctx.node()
1095 1095 parents = [p.node() for p in ctx.parents() if p]
1096 1096 branch = ctx.branch()
1097 1097 if switch_parent:
1098 1098 parents.reverse()
1099 1099
1100 1100 if parents:
1101 1101 prev = parents[0]
1102 1102 else:
1103 1103 prev = nullid
1104 1104
1105 1105 shouldclose = False
1106 1106 if not fp and len(template) > 0:
1107 1107 desc_lines = ctx.description().rstrip().split('\n')
1108 1108 desc = desc_lines[0] #Commit always has a first line.
1109 1109 fp = makefileobj(repo, template, node, desc=desc, total=total,
1110 1110 seqno=seqno, revwidth=revwidth, mode='wb',
1111 1111 modemap=filemode)
1112 1112 shouldclose = True
1113 1113 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1114 1114 repo.ui.note("%s\n" % fp.name)
1115 1115
1116 1116 if not fp:
1117 1117 write = repo.ui.write
1118 1118 else:
1119 1119 def write(s, **kw):
1120 1120 fp.write(s)
1121 1121
1122 1122 write("# HG changeset patch\n")
1123 1123 write("# User %s\n" % ctx.user())
1124 1124 write("# Date %d %d\n" % ctx.date())
1125 1125 write("# %s\n" % util.datestr(ctx.date()))
1126 1126 if branch and branch != 'default':
1127 1127 write("# Branch %s\n" % branch)
1128 1128 write("# Node ID %s\n" % hex(node))
1129 1129 write("# Parent %s\n" % hex(prev))
1130 1130 if len(parents) > 1:
1131 1131 write("# Parent %s\n" % hex(parents[1]))
1132 1132
1133 1133 for headerid in extraexport:
1134 1134 header = extraexportmap[headerid](seqno, ctx)
1135 1135 if header is not None:
1136 1136 write('# %s\n' % header)
1137 1137 write(ctx.description().rstrip())
1138 1138 write("\n\n")
1139 1139
1140 1140 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1141 1141 write(chunk, label=label)
1142 1142
1143 1143 if shouldclose:
1144 1144 fp.close()
1145 1145
1146 1146 for seqno, rev in enumerate(revs):
1147 1147 single(rev, seqno + 1, fp)
1148 1148
1149 1149 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1150 1150 changes=None, stat=False, fp=None, prefix='',
1151 1151 root='', listsubrepos=False):
1152 1152 '''show diff or diffstat.'''
1153 1153 if fp is None:
1154 1154 write = ui.write
1155 1155 else:
1156 1156 def write(s, **kw):
1157 1157 fp.write(s)
1158 1158
1159 1159 if root:
1160 1160 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1161 1161 else:
1162 1162 relroot = ''
1163 1163 if relroot != '':
1164 1164 # XXX relative roots currently don't work if the root is within a
1165 1165 # subrepo
1166 1166 uirelroot = match.uipath(relroot)
1167 1167 relroot += '/'
1168 1168 for matchroot in match.files():
1169 1169 if not matchroot.startswith(relroot):
1170 1170 ui.warn(_('warning: %s not inside relative root %s\n') % (
1171 1171 match.uipath(matchroot), uirelroot))
1172 1172
1173 1173 if stat:
1174 1174 diffopts = diffopts.copy(context=0)
1175 1175 width = 80
1176 1176 if not ui.plain():
1177 1177 width = ui.termwidth()
1178 1178 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1179 1179 prefix=prefix, relroot=relroot)
1180 1180 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1181 1181 width=width,
1182 1182 git=diffopts.git):
1183 1183 write(chunk, label=label)
1184 1184 else:
1185 1185 for chunk, label in patch.diffui(repo, node1, node2, match,
1186 1186 changes, diffopts, prefix=prefix,
1187 1187 relroot=relroot):
1188 1188 write(chunk, label=label)
1189 1189
1190 1190 if listsubrepos:
1191 1191 ctx1 = repo[node1]
1192 1192 ctx2 = repo[node2]
1193 1193 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1194 1194 tempnode2 = node2
1195 1195 try:
1196 1196 if node2 is not None:
1197 1197 tempnode2 = ctx2.substate[subpath][1]
1198 1198 except KeyError:
1199 1199 # A subrepo that existed in node1 was deleted between node1 and
1200 1200 # node2 (inclusive). Thus, ctx2's substate won't contain that
1201 1201 # subpath. The best we can do is to ignore it.
1202 1202 tempnode2 = None
1203 1203 submatch = matchmod.subdirmatcher(subpath, match)
1204 1204 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1205 1205 stat=stat, fp=fp, prefix=prefix)
1206 1206
1207 1207 class changeset_printer(object):
1208 1208 '''show changeset information when templating not requested.'''
1209 1209
1210 1210 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1211 1211 self.ui = ui
1212 1212 self.repo = repo
1213 1213 self.buffered = buffered
1214 1214 self.matchfn = matchfn
1215 1215 self.diffopts = diffopts
1216 1216 self.header = {}
1217 1217 self.hunk = {}
1218 1218 self.lastheader = None
1219 1219 self.footer = None
1220 1220
1221 1221 def flush(self, ctx):
1222 1222 rev = ctx.rev()
1223 1223 if rev in self.header:
1224 1224 h = self.header[rev]
1225 1225 if h != self.lastheader:
1226 1226 self.lastheader = h
1227 1227 self.ui.write(h)
1228 1228 del self.header[rev]
1229 1229 if rev in self.hunk:
1230 1230 self.ui.write(self.hunk[rev])
1231 1231 del self.hunk[rev]
1232 1232 return 1
1233 1233 return 0
1234 1234
1235 1235 def close(self):
1236 1236 if self.footer:
1237 1237 self.ui.write(self.footer)
1238 1238
1239 1239 def show(self, ctx, copies=None, matchfn=None, **props):
1240 1240 if self.buffered:
1241 1241 self.ui.pushbuffer(labeled=True)
1242 1242 self._show(ctx, copies, matchfn, props)
1243 1243 self.hunk[ctx.rev()] = self.ui.popbuffer()
1244 1244 else:
1245 1245 self._show(ctx, copies, matchfn, props)
1246 1246
1247 1247 def _show(self, ctx, copies, matchfn, props):
1248 1248 '''show a single changeset or file revision'''
1249 1249 changenode = ctx.node()
1250 1250 rev = ctx.rev()
1251 1251 if self.ui.debugflag:
1252 1252 hexfunc = hex
1253 1253 else:
1254 1254 hexfunc = short
1255 1255 # as of now, wctx.node() and wctx.rev() return None, but we want to
1256 1256 # show the same values as {node} and {rev} templatekw
1257 1257 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1258 1258
1259 1259 if self.ui.quiet:
1260 1260 self.ui.write("%d:%s\n" % revnode, label='log.node')
1261 1261 return
1262 1262
1263 1263 date = util.datestr(ctx.date())
1264 1264
1265 1265 # i18n: column positioning for "hg log"
1266 1266 self.ui.write(_("changeset: %d:%s\n") % revnode,
1267 1267 label='log.changeset changeset.%s' % ctx.phasestr())
1268 1268
1269 1269 # branches are shown first before any other names due to backwards
1270 1270 # compatibility
1271 1271 branch = ctx.branch()
1272 1272 # don't show the default branch name
1273 1273 if branch != 'default':
1274 1274 # i18n: column positioning for "hg log"
1275 1275 self.ui.write(_("branch: %s\n") % branch,
1276 1276 label='log.branch')
1277 1277
1278 1278 for nsname, ns in self.repo.names.iteritems():
1279 1279 # branches has special logic already handled above, so here we just
1280 1280 # skip it
1281 1281 if nsname == 'branches':
1282 1282 continue
1283 1283 # we will use the templatename as the color name since those two
1284 1284 # should be the same
1285 1285 for name in ns.names(self.repo, changenode):
1286 1286 self.ui.write(ns.logfmt % name,
1287 1287 label='log.%s' % ns.colorname)
1288 1288 if self.ui.debugflag:
1289 1289 # i18n: column positioning for "hg log"
1290 1290 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1291 1291 label='log.phase')
1292 1292 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1293 1293 label = 'log.parent changeset.%s' % pctx.phasestr()
1294 1294 # i18n: column positioning for "hg log"
1295 1295 self.ui.write(_("parent: %d:%s\n")
1296 1296 % (pctx.rev(), hexfunc(pctx.node())),
1297 1297 label=label)
1298 1298
1299 1299 if self.ui.debugflag and rev is not None:
1300 1300 mnode = ctx.manifestnode()
1301 1301 # i18n: column positioning for "hg log"
1302 1302 self.ui.write(_("manifest: %d:%s\n") %
1303 1303 (self.repo.manifest.rev(mnode), hex(mnode)),
1304 1304 label='ui.debug log.manifest')
1305 1305 # i18n: column positioning for "hg log"
1306 1306 self.ui.write(_("user: %s\n") % ctx.user(),
1307 1307 label='log.user')
1308 1308 # i18n: column positioning for "hg log"
1309 1309 self.ui.write(_("date: %s\n") % date,
1310 1310 label='log.date')
1311 1311
1312 1312 if self.ui.debugflag:
1313 1313 files = ctx.p1().status(ctx)[:3]
1314 1314 for key, value in zip([# i18n: column positioning for "hg log"
1315 1315 _("files:"),
1316 1316 # i18n: column positioning for "hg log"
1317 1317 _("files+:"),
1318 1318 # i18n: column positioning for "hg log"
1319 1319 _("files-:")], files):
1320 1320 if value:
1321 1321 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1322 1322 label='ui.debug log.files')
1323 1323 elif ctx.files() and self.ui.verbose:
1324 1324 # i18n: column positioning for "hg log"
1325 1325 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1326 1326 label='ui.note log.files')
1327 1327 if copies and self.ui.verbose:
1328 1328 copies = ['%s (%s)' % c for c in copies]
1329 1329 # i18n: column positioning for "hg log"
1330 1330 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1331 1331 label='ui.note log.copies')
1332 1332
1333 1333 extra = ctx.extra()
1334 1334 if extra and self.ui.debugflag:
1335 1335 for key, value in sorted(extra.items()):
1336 1336 # i18n: column positioning for "hg log"
1337 1337 self.ui.write(_("extra: %s=%s\n")
1338 1338 % (key, value.encode('string_escape')),
1339 1339 label='ui.debug log.extra')
1340 1340
1341 1341 description = ctx.description().strip()
1342 1342 if description:
1343 1343 if self.ui.verbose:
1344 1344 self.ui.write(_("description:\n"),
1345 1345 label='ui.note log.description')
1346 1346 self.ui.write(description,
1347 1347 label='ui.note log.description')
1348 1348 self.ui.write("\n\n")
1349 1349 else:
1350 1350 # i18n: column positioning for "hg log"
1351 1351 self.ui.write(_("summary: %s\n") %
1352 1352 description.splitlines()[0],
1353 1353 label='log.summary')
1354 1354 self.ui.write("\n")
1355 1355
1356 1356 self.showpatch(ctx, matchfn)
1357 1357
1358 1358 def showpatch(self, ctx, matchfn):
1359 1359 if not matchfn:
1360 1360 matchfn = self.matchfn
1361 1361 if matchfn:
1362 1362 stat = self.diffopts.get('stat')
1363 1363 diff = self.diffopts.get('patch')
1364 1364 diffopts = patch.diffallopts(self.ui, self.diffopts)
1365 1365 node = ctx.node()
1366 1366 prev = ctx.p1().node()
1367 1367 if stat:
1368 1368 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1369 1369 match=matchfn, stat=True)
1370 1370 if diff:
1371 1371 if stat:
1372 1372 self.ui.write("\n")
1373 1373 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1374 1374 match=matchfn, stat=False)
1375 1375 self.ui.write("\n")
1376 1376
1377 1377 class jsonchangeset(changeset_printer):
1378 1378 '''format changeset information.'''
1379 1379
1380 1380 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1381 1381 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1382 1382 self.cache = {}
1383 1383 self._first = True
1384 1384
1385 1385 def close(self):
1386 1386 if not self._first:
1387 1387 self.ui.write("\n]\n")
1388 1388 else:
1389 1389 self.ui.write("[]\n")
1390 1390
1391 1391 def _show(self, ctx, copies, matchfn, props):
1392 1392 '''show a single changeset or file revision'''
1393 1393 rev = ctx.rev()
1394 1394 if rev is None:
1395 1395 jrev = jnode = 'null'
1396 1396 else:
1397 1397 jrev = str(rev)
1398 1398 jnode = '"%s"' % hex(ctx.node())
1399 1399 j = encoding.jsonescape
1400 1400
1401 1401 if self._first:
1402 1402 self.ui.write("[\n {")
1403 1403 self._first = False
1404 1404 else:
1405 1405 self.ui.write(",\n {")
1406 1406
1407 1407 if self.ui.quiet:
1408 1408 self.ui.write(('\n "rev": %s') % jrev)
1409 1409 self.ui.write((',\n "node": %s') % jnode)
1410 1410 self.ui.write('\n }')
1411 1411 return
1412 1412
1413 1413 self.ui.write(('\n "rev": %s') % jrev)
1414 1414 self.ui.write((',\n "node": %s') % jnode)
1415 1415 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1416 1416 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1417 1417 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1418 1418 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1419 1419 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1420 1420
1421 1421 self.ui.write((',\n "bookmarks": [%s]') %
1422 1422 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1423 1423 self.ui.write((',\n "tags": [%s]') %
1424 1424 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1425 1425 self.ui.write((',\n "parents": [%s]') %
1426 1426 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1427 1427
1428 1428 if self.ui.debugflag:
1429 1429 if rev is None:
1430 1430 jmanifestnode = 'null'
1431 1431 else:
1432 1432 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1433 1433 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1434 1434
1435 1435 self.ui.write((',\n "extra": {%s}') %
1436 1436 ", ".join('"%s": "%s"' % (j(k), j(v))
1437 1437 for k, v in ctx.extra().items()))
1438 1438
1439 1439 files = ctx.p1().status(ctx)
1440 1440 self.ui.write((',\n "modified": [%s]') %
1441 1441 ", ".join('"%s"' % j(f) for f in files[0]))
1442 1442 self.ui.write((',\n "added": [%s]') %
1443 1443 ", ".join('"%s"' % j(f) for f in files[1]))
1444 1444 self.ui.write((',\n "removed": [%s]') %
1445 1445 ", ".join('"%s"' % j(f) for f in files[2]))
1446 1446
1447 1447 elif self.ui.verbose:
1448 1448 self.ui.write((',\n "files": [%s]') %
1449 1449 ", ".join('"%s"' % j(f) for f in ctx.files()))
1450 1450
1451 1451 if copies:
1452 1452 self.ui.write((',\n "copies": {%s}') %
1453 1453 ", ".join('"%s": "%s"' % (j(k), j(v))
1454 1454 for k, v in copies))
1455 1455
1456 1456 matchfn = self.matchfn
1457 1457 if matchfn:
1458 1458 stat = self.diffopts.get('stat')
1459 1459 diff = self.diffopts.get('patch')
1460 1460 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1461 1461 node, prev = ctx.node(), ctx.p1().node()
1462 1462 if stat:
1463 1463 self.ui.pushbuffer()
1464 1464 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1465 1465 match=matchfn, stat=True)
1466 1466 self.ui.write((',\n "diffstat": "%s"')
1467 1467 % j(self.ui.popbuffer()))
1468 1468 if diff:
1469 1469 self.ui.pushbuffer()
1470 1470 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1471 1471 match=matchfn, stat=False)
1472 1472 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1473 1473
1474 1474 self.ui.write("\n }")
1475 1475
1476 1476 class changeset_templater(changeset_printer):
1477 1477 '''format changeset information.'''
1478 1478
1479 1479 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1480 1480 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1481 1481 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1482 1482 filters = {'formatnode': formatnode}
1483 1483 defaulttempl = {
1484 1484 'parent': '{rev}:{node|formatnode} ',
1485 1485 'manifest': '{rev}:{node|formatnode}',
1486 1486 'file_copy': '{name} ({source})',
1487 1487 'extra': '{key}={value|stringescape}'
1488 1488 }
1489 1489 # filecopy is preserved for compatibility reasons
1490 1490 defaulttempl['filecopy'] = defaulttempl['file_copy']
1491 1491 assert not (tmpl and mapfile)
1492 1492 if mapfile:
1493 1493 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1494 1494 cache=defaulttempl)
1495 1495 else:
1496 1496 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1497 1497 filters=filters,
1498 1498 cache=defaulttempl)
1499 1499
1500 1500 self.cache = {}
1501 1501
1502 1502 # find correct templates for current mode
1503 1503 tmplmodes = [
1504 1504 (True, None),
1505 1505 (self.ui.verbose, 'verbose'),
1506 1506 (self.ui.quiet, 'quiet'),
1507 1507 (self.ui.debugflag, 'debug'),
1508 1508 ]
1509 1509
1510 1510 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1511 1511 'docheader': '', 'docfooter': ''}
1512 1512 for mode, postfix in tmplmodes:
1513 1513 for t in self._parts:
1514 1514 cur = t
1515 1515 if postfix:
1516 1516 cur += "_" + postfix
1517 1517 if mode and cur in self.t:
1518 1518 self._parts[t] = cur
1519 1519
1520 1520 if self._parts['docheader']:
1521 1521 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1522 1522
1523 1523 def close(self):
1524 1524 if self._parts['docfooter']:
1525 1525 if not self.footer:
1526 1526 self.footer = ""
1527 1527 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1528 1528 return super(changeset_templater, self).close()
1529 1529
1530 1530 def _show(self, ctx, copies, matchfn, props):
1531 1531 '''show a single changeset or file revision'''
1532 1532 props = props.copy()
1533 1533 props.update(templatekw.keywords)
1534 1534 props['templ'] = self.t
1535 1535 props['ctx'] = ctx
1536 1536 props['repo'] = self.repo
1537 1537 props['ui'] = self.repo.ui
1538 1538 props['revcache'] = {'copies': copies}
1539 1539 props['cache'] = self.cache
1540 1540
1541 1541 # write header
1542 1542 if self._parts['header']:
1543 1543 h = templater.stringify(self.t(self._parts['header'], **props))
1544 1544 if self.buffered:
1545 1545 self.header[ctx.rev()] = h
1546 1546 else:
1547 1547 if self.lastheader != h:
1548 1548 self.lastheader = h
1549 1549 self.ui.write(h)
1550 1550
1551 1551 # write changeset metadata, then patch if requested
1552 1552 key = self._parts['changeset']
1553 1553 self.ui.write(templater.stringify(self.t(key, **props)))
1554 1554 self.showpatch(ctx, matchfn)
1555 1555
1556 1556 if self._parts['footer']:
1557 1557 if not self.footer:
1558 1558 self.footer = templater.stringify(
1559 1559 self.t(self._parts['footer'], **props))
1560 1560
1561 1561 def gettemplate(ui, tmpl, style):
1562 1562 """
1563 1563 Find the template matching the given template spec or style.
1564 1564 """
1565 1565
1566 1566 # ui settings
1567 1567 if not tmpl and not style: # template are stronger than style
1568 1568 tmpl = ui.config('ui', 'logtemplate')
1569 1569 if tmpl:
1570 1570 return templater.unquotestring(tmpl), None
1571 1571 else:
1572 1572 style = util.expandpath(ui.config('ui', 'style', ''))
1573 1573
1574 1574 if not tmpl and style:
1575 1575 mapfile = style
1576 1576 if not os.path.split(mapfile)[0]:
1577 1577 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1578 1578 or templater.templatepath(mapfile))
1579 1579 if mapname:
1580 1580 mapfile = mapname
1581 1581 return None, mapfile
1582 1582
1583 1583 if not tmpl:
1584 1584 return None, None
1585 1585
1586 1586 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1587 1587
1588 1588 def show_changeset(ui, repo, opts, buffered=False):
1589 1589 """show one changeset using template or regular display.
1590 1590
1591 1591 Display format will be the first non-empty hit of:
1592 1592 1. option 'template'
1593 1593 2. option 'style'
1594 1594 3. [ui] setting 'logtemplate'
1595 1595 4. [ui] setting 'style'
1596 1596 If all of these values are either the unset or the empty string,
1597 1597 regular display via changeset_printer() is done.
1598 1598 """
1599 1599 # options
1600 1600 matchfn = None
1601 1601 if opts.get('patch') or opts.get('stat'):
1602 1602 matchfn = scmutil.matchall(repo)
1603 1603
1604 1604 if opts.get('template') == 'json':
1605 1605 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1606 1606
1607 1607 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1608 1608
1609 1609 if not tmpl and not mapfile:
1610 1610 return changeset_printer(ui, repo, matchfn, opts, buffered)
1611 1611
1612 1612 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1613 1613
1614 1614 def showmarker(ui, marker, index=None):
1615 1615 """utility function to display obsolescence marker in a readable way
1616 1616
1617 1617 To be used by debug function."""
1618 1618 if index is not None:
1619 1619 ui.write("%i " % index)
1620 1620 ui.write(hex(marker.precnode()))
1621 1621 for repl in marker.succnodes():
1622 1622 ui.write(' ')
1623 1623 ui.write(hex(repl))
1624 1624 ui.write(' %X ' % marker.flags())
1625 1625 parents = marker.parentnodes()
1626 1626 if parents is not None:
1627 1627 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1628 1628 ui.write('(%s) ' % util.datestr(marker.date()))
1629 1629 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1630 1630 sorted(marker.metadata().items())
1631 1631 if t[0] != 'date')))
1632 1632 ui.write('\n')
1633 1633
1634 1634 def finddate(ui, repo, date):
1635 1635 """Find the tipmost changeset that matches the given date spec"""
1636 1636
1637 1637 df = util.matchdate(date)
1638 1638 m = scmutil.matchall(repo)
1639 1639 results = {}
1640 1640
1641 1641 def prep(ctx, fns):
1642 1642 d = ctx.date()
1643 1643 if df(d[0]):
1644 1644 results[ctx.rev()] = d
1645 1645
1646 1646 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1647 1647 rev = ctx.rev()
1648 1648 if rev in results:
1649 1649 ui.status(_("found revision %s from %s\n") %
1650 1650 (rev, util.datestr(results[rev])))
1651 1651 return str(rev)
1652 1652
1653 1653 raise error.Abort(_("revision matching date not found"))
1654 1654
1655 1655 def increasingwindows(windowsize=8, sizelimit=512):
1656 1656 while True:
1657 1657 yield windowsize
1658 1658 if windowsize < sizelimit:
1659 1659 windowsize *= 2
1660 1660
1661 1661 class FileWalkError(Exception):
1662 1662 pass
1663 1663
1664 1664 def walkfilerevs(repo, match, follow, revs, fncache):
1665 1665 '''Walks the file history for the matched files.
1666 1666
1667 1667 Returns the changeset revs that are involved in the file history.
1668 1668
1669 1669 Throws FileWalkError if the file history can't be walked using
1670 1670 filelogs alone.
1671 1671 '''
1672 1672 wanted = set()
1673 1673 copies = []
1674 1674 minrev, maxrev = min(revs), max(revs)
1675 1675 def filerevgen(filelog, last):
1676 1676 """
1677 1677 Only files, no patterns. Check the history of each file.
1678 1678
1679 1679 Examines filelog entries within minrev, maxrev linkrev range
1680 1680 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1681 1681 tuples in backwards order
1682 1682 """
1683 1683 cl_count = len(repo)
1684 1684 revs = []
1685 1685 for j in xrange(0, last + 1):
1686 1686 linkrev = filelog.linkrev(j)
1687 1687 if linkrev < minrev:
1688 1688 continue
1689 1689 # only yield rev for which we have the changelog, it can
1690 1690 # happen while doing "hg log" during a pull or commit
1691 1691 if linkrev >= cl_count:
1692 1692 break
1693 1693
1694 1694 parentlinkrevs = []
1695 1695 for p in filelog.parentrevs(j):
1696 1696 if p != nullrev:
1697 1697 parentlinkrevs.append(filelog.linkrev(p))
1698 1698 n = filelog.node(j)
1699 1699 revs.append((linkrev, parentlinkrevs,
1700 1700 follow and filelog.renamed(n)))
1701 1701
1702 1702 return reversed(revs)
1703 1703 def iterfiles():
1704 1704 pctx = repo['.']
1705 1705 for filename in match.files():
1706 1706 if follow:
1707 1707 if filename not in pctx:
1708 1708 raise error.Abort(_('cannot follow file not in parent '
1709 1709 'revision: "%s"') % filename)
1710 1710 yield filename, pctx[filename].filenode()
1711 1711 else:
1712 1712 yield filename, None
1713 1713 for filename_node in copies:
1714 1714 yield filename_node
1715 1715
1716 1716 for file_, node in iterfiles():
1717 1717 filelog = repo.file(file_)
1718 1718 if not len(filelog):
1719 1719 if node is None:
1720 1720 # A zero count may be a directory or deleted file, so
1721 1721 # try to find matching entries on the slow path.
1722 1722 if follow:
1723 1723 raise error.Abort(
1724 1724 _('cannot follow nonexistent file: "%s"') % file_)
1725 1725 raise FileWalkError("Cannot walk via filelog")
1726 1726 else:
1727 1727 continue
1728 1728
1729 1729 if node is None:
1730 1730 last = len(filelog) - 1
1731 1731 else:
1732 1732 last = filelog.rev(node)
1733 1733
1734 1734 # keep track of all ancestors of the file
1735 1735 ancestors = set([filelog.linkrev(last)])
1736 1736
1737 1737 # iterate from latest to oldest revision
1738 1738 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1739 1739 if not follow:
1740 1740 if rev > maxrev:
1741 1741 continue
1742 1742 else:
1743 1743 # Note that last might not be the first interesting
1744 1744 # rev to us:
1745 1745 # if the file has been changed after maxrev, we'll
1746 1746 # have linkrev(last) > maxrev, and we still need
1747 1747 # to explore the file graph
1748 1748 if rev not in ancestors:
1749 1749 continue
1750 1750 # XXX insert 1327 fix here
1751 1751 if flparentlinkrevs:
1752 1752 ancestors.update(flparentlinkrevs)
1753 1753
1754 1754 fncache.setdefault(rev, []).append(file_)
1755 1755 wanted.add(rev)
1756 1756 if copied:
1757 1757 copies.append(copied)
1758 1758
1759 1759 return wanted
1760 1760
1761 1761 class _followfilter(object):
1762 1762 def __init__(self, repo, onlyfirst=False):
1763 1763 self.repo = repo
1764 1764 self.startrev = nullrev
1765 1765 self.roots = set()
1766 1766 self.onlyfirst = onlyfirst
1767 1767
1768 1768 def match(self, rev):
1769 1769 def realparents(rev):
1770 1770 if self.onlyfirst:
1771 1771 return self.repo.changelog.parentrevs(rev)[0:1]
1772 1772 else:
1773 1773 return filter(lambda x: x != nullrev,
1774 1774 self.repo.changelog.parentrevs(rev))
1775 1775
1776 1776 if self.startrev == nullrev:
1777 1777 self.startrev = rev
1778 1778 return True
1779 1779
1780 1780 if rev > self.startrev:
1781 1781 # forward: all descendants
1782 1782 if not self.roots:
1783 1783 self.roots.add(self.startrev)
1784 1784 for parent in realparents(rev):
1785 1785 if parent in self.roots:
1786 1786 self.roots.add(rev)
1787 1787 return True
1788 1788 else:
1789 1789 # backwards: all parents
1790 1790 if not self.roots:
1791 1791 self.roots.update(realparents(self.startrev))
1792 1792 if rev in self.roots:
1793 1793 self.roots.remove(rev)
1794 1794 self.roots.update(realparents(rev))
1795 1795 return True
1796 1796
1797 1797 return False
1798 1798
1799 1799 def walkchangerevs(repo, match, opts, prepare):
1800 1800 '''Iterate over files and the revs in which they changed.
1801 1801
1802 1802 Callers most commonly need to iterate backwards over the history
1803 1803 in which they are interested. Doing so has awful (quadratic-looking)
1804 1804 performance, so we use iterators in a "windowed" way.
1805 1805
1806 1806 We walk a window of revisions in the desired order. Within the
1807 1807 window, we first walk forwards to gather data, then in the desired
1808 1808 order (usually backwards) to display it.
1809 1809
1810 1810 This function returns an iterator yielding contexts. Before
1811 1811 yielding each context, the iterator will first call the prepare
1812 1812 function on each context in the window in forward order.'''
1813 1813
1814 1814 follow = opts.get('follow') or opts.get('follow_first')
1815 1815 revs = _logrevs(repo, opts)
1816 1816 if not revs:
1817 1817 return []
1818 1818 wanted = set()
1819 1819 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1820 1820 opts.get('removed'))
1821 1821 fncache = {}
1822 1822 change = repo.changectx
1823 1823
1824 1824 # First step is to fill wanted, the set of revisions that we want to yield.
1825 1825 # When it does not induce extra cost, we also fill fncache for revisions in
1826 1826 # wanted: a cache of filenames that were changed (ctx.files()) and that
1827 1827 # match the file filtering conditions.
1828 1828
1829 1829 if match.always():
1830 1830 # No files, no patterns. Display all revs.
1831 1831 wanted = revs
1832 1832 elif not slowpath:
1833 1833 # We only have to read through the filelog to find wanted revisions
1834 1834
1835 1835 try:
1836 1836 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1837 1837 except FileWalkError:
1838 1838 slowpath = True
1839 1839
1840 1840 # We decided to fall back to the slowpath because at least one
1841 1841 # of the paths was not a file. Check to see if at least one of them
1842 1842 # existed in history, otherwise simply return
1843 1843 for path in match.files():
1844 1844 if path == '.' or path in repo.store:
1845 1845 break
1846 1846 else:
1847 1847 return []
1848 1848
1849 1849 if slowpath:
1850 1850 # We have to read the changelog to match filenames against
1851 1851 # changed files
1852 1852
1853 1853 if follow:
1854 1854 raise error.Abort(_('can only follow copies/renames for explicit '
1855 1855 'filenames'))
1856 1856
1857 1857 # The slow path checks files modified in every changeset.
1858 1858 # This is really slow on large repos, so compute the set lazily.
1859 1859 class lazywantedset(object):
1860 1860 def __init__(self):
1861 1861 self.set = set()
1862 1862 self.revs = set(revs)
1863 1863
1864 1864 # No need to worry about locality here because it will be accessed
1865 1865 # in the same order as the increasing window below.
1866 1866 def __contains__(self, value):
1867 1867 if value in self.set:
1868 1868 return True
1869 1869 elif not value in self.revs:
1870 1870 return False
1871 1871 else:
1872 1872 self.revs.discard(value)
1873 1873 ctx = change(value)
1874 1874 matches = filter(match, ctx.files())
1875 1875 if matches:
1876 1876 fncache[value] = matches
1877 1877 self.set.add(value)
1878 1878 return True
1879 1879 return False
1880 1880
1881 1881 def discard(self, value):
1882 1882 self.revs.discard(value)
1883 1883 self.set.discard(value)
1884 1884
1885 1885 wanted = lazywantedset()
1886 1886
1887 1887 # it might be worthwhile to do this in the iterator if the rev range
1888 1888 # is descending and the prune args are all within that range
1889 1889 for rev in opts.get('prune', ()):
1890 1890 rev = repo[rev].rev()
1891 1891 ff = _followfilter(repo)
1892 1892 stop = min(revs[0], revs[-1])
1893 1893 for x in xrange(rev, stop - 1, -1):
1894 1894 if ff.match(x):
1895 1895 wanted = wanted - [x]
1896 1896
1897 1897 # Now that wanted is correctly initialized, we can iterate over the
1898 1898 # revision range, yielding only revisions in wanted.
1899 1899 def iterate():
1900 1900 if follow and match.always():
1901 1901 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1902 1902 def want(rev):
1903 1903 return ff.match(rev) and rev in wanted
1904 1904 else:
1905 1905 def want(rev):
1906 1906 return rev in wanted
1907 1907
1908 1908 it = iter(revs)
1909 1909 stopiteration = False
1910 1910 for windowsize in increasingwindows():
1911 1911 nrevs = []
1912 1912 for i in xrange(windowsize):
1913 1913 rev = next(it, None)
1914 1914 if rev is None:
1915 1915 stopiteration = True
1916 1916 break
1917 1917 elif want(rev):
1918 1918 nrevs.append(rev)
1919 1919 for rev in sorted(nrevs):
1920 1920 fns = fncache.get(rev)
1921 1921 ctx = change(rev)
1922 1922 if not fns:
1923 1923 def fns_generator():
1924 1924 for f in ctx.files():
1925 1925 if match(f):
1926 1926 yield f
1927 1927 fns = fns_generator()
1928 1928 prepare(ctx, fns)
1929 1929 for rev in nrevs:
1930 1930 yield change(rev)
1931 1931
1932 1932 if stopiteration:
1933 1933 break
1934 1934
1935 1935 return iterate()
1936 1936
1937 1937 def _makefollowlogfilematcher(repo, files, followfirst):
1938 1938 # When displaying a revision with --patch --follow FILE, we have
1939 1939 # to know which file of the revision must be diffed. With
1940 1940 # --follow, we want the names of the ancestors of FILE in the
1941 1941 # revision, stored in "fcache". "fcache" is populated by
1942 1942 # reproducing the graph traversal already done by --follow revset
1943 1943 # and relating linkrevs to file names (which is not "correct" but
1944 1944 # good enough).
1945 1945 fcache = {}
1946 1946 fcacheready = [False]
1947 1947 pctx = repo['.']
1948 1948
1949 1949 def populate():
1950 1950 for fn in files:
1951 1951 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1952 1952 for c in i:
1953 1953 fcache.setdefault(c.linkrev(), set()).add(c.path())
1954 1954
1955 1955 def filematcher(rev):
1956 1956 if not fcacheready[0]:
1957 1957 # Lazy initialization
1958 1958 fcacheready[0] = True
1959 1959 populate()
1960 1960 return scmutil.matchfiles(repo, fcache.get(rev, []))
1961 1961
1962 1962 return filematcher
1963 1963
1964 1964 def _makenofollowlogfilematcher(repo, pats, opts):
1965 1965 '''hook for extensions to override the filematcher for non-follow cases'''
1966 1966 return None
1967 1967
1968 1968 def _makelogrevset(repo, pats, opts, revs):
1969 1969 """Return (expr, filematcher) where expr is a revset string built
1970 1970 from log options and file patterns or None. If --stat or --patch
1971 1971 are not passed filematcher is None. Otherwise it is a callable
1972 1972 taking a revision number and returning a match objects filtering
1973 1973 the files to be detailed when displaying the revision.
1974 1974 """
1975 1975 opt2revset = {
1976 1976 'no_merges': ('not merge()', None),
1977 1977 'only_merges': ('merge()', None),
1978 1978 '_ancestors': ('ancestors(%(val)s)', None),
1979 1979 '_fancestors': ('_firstancestors(%(val)s)', None),
1980 1980 '_descendants': ('descendants(%(val)s)', None),
1981 1981 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1982 1982 '_matchfiles': ('_matchfiles(%(val)s)', None),
1983 1983 'date': ('date(%(val)r)', None),
1984 1984 'branch': ('branch(%(val)r)', ' or '),
1985 1985 '_patslog': ('filelog(%(val)r)', ' or '),
1986 1986 '_patsfollow': ('follow(%(val)r)', ' or '),
1987 1987 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1988 1988 'keyword': ('keyword(%(val)r)', ' or '),
1989 1989 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1990 1990 'user': ('user(%(val)r)', ' or '),
1991 1991 }
1992 1992
1993 1993 opts = dict(opts)
1994 1994 # follow or not follow?
1995 1995 follow = opts.get('follow') or opts.get('follow_first')
1996 1996 if opts.get('follow_first'):
1997 1997 followfirst = 1
1998 1998 else:
1999 1999 followfirst = 0
2000 2000 # --follow with FILE behavior depends on revs...
2001 2001 it = iter(revs)
2002 2002 startrev = next(it)
2003 2003 followdescendants = startrev < next(it, startrev)
2004 2004
2005 2005 # branch and only_branch are really aliases and must be handled at
2006 2006 # the same time
2007 2007 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2008 2008 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2009 2009 # pats/include/exclude are passed to match.match() directly in
2010 2010 # _matchfiles() revset but walkchangerevs() builds its matcher with
2011 2011 # scmutil.match(). The difference is input pats are globbed on
2012 2012 # platforms without shell expansion (windows).
2013 2013 wctx = repo[None]
2014 2014 match, pats = scmutil.matchandpats(wctx, pats, opts)
2015 2015 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2016 2016 opts.get('removed'))
2017 2017 if not slowpath:
2018 2018 for f in match.files():
2019 2019 if follow and f not in wctx:
2020 2020 # If the file exists, it may be a directory, so let it
2021 2021 # take the slow path.
2022 2022 if os.path.exists(repo.wjoin(f)):
2023 2023 slowpath = True
2024 2024 continue
2025 2025 else:
2026 2026 raise error.Abort(_('cannot follow file not in parent '
2027 2027 'revision: "%s"') % f)
2028 2028 filelog = repo.file(f)
2029 2029 if not filelog:
2030 2030 # A zero count may be a directory or deleted file, so
2031 2031 # try to find matching entries on the slow path.
2032 2032 if follow:
2033 2033 raise error.Abort(
2034 2034 _('cannot follow nonexistent file: "%s"') % f)
2035 2035 slowpath = True
2036 2036
2037 2037 # We decided to fall back to the slowpath because at least one
2038 2038 # of the paths was not a file. Check to see if at least one of them
2039 2039 # existed in history - in that case, we'll continue down the
2040 2040 # slowpath; otherwise, we can turn off the slowpath
2041 2041 if slowpath:
2042 2042 for path in match.files():
2043 2043 if path == '.' or path in repo.store:
2044 2044 break
2045 2045 else:
2046 2046 slowpath = False
2047 2047
2048 2048 fpats = ('_patsfollow', '_patsfollowfirst')
2049 2049 fnopats = (('_ancestors', '_fancestors'),
2050 2050 ('_descendants', '_fdescendants'))
2051 2051 if slowpath:
2052 2052 # See walkchangerevs() slow path.
2053 2053 #
2054 2054 # pats/include/exclude cannot be represented as separate
2055 2055 # revset expressions as their filtering logic applies at file
2056 2056 # level. For instance "-I a -X a" matches a revision touching
2057 2057 # "a" and "b" while "file(a) and not file(b)" does
2058 2058 # not. Besides, filesets are evaluated against the working
2059 2059 # directory.
2060 2060 matchargs = ['r:', 'd:relpath']
2061 2061 for p in pats:
2062 2062 matchargs.append('p:' + p)
2063 2063 for p in opts.get('include', []):
2064 2064 matchargs.append('i:' + p)
2065 2065 for p in opts.get('exclude', []):
2066 2066 matchargs.append('x:' + p)
2067 2067 matchargs = ','.join(('%r' % p) for p in matchargs)
2068 2068 opts['_matchfiles'] = matchargs
2069 2069 if follow:
2070 2070 opts[fnopats[0][followfirst]] = '.'
2071 2071 else:
2072 2072 if follow:
2073 2073 if pats:
2074 2074 # follow() revset interprets its file argument as a
2075 2075 # manifest entry, so use match.files(), not pats.
2076 2076 opts[fpats[followfirst]] = list(match.files())
2077 2077 else:
2078 2078 op = fnopats[followdescendants][followfirst]
2079 2079 opts[op] = 'rev(%d)' % startrev
2080 2080 else:
2081 2081 opts['_patslog'] = list(pats)
2082 2082
2083 2083 filematcher = None
2084 2084 if opts.get('patch') or opts.get('stat'):
2085 2085 # When following files, track renames via a special matcher.
2086 2086 # If we're forced to take the slowpath it means we're following
2087 2087 # at least one pattern/directory, so don't bother with rename tracking.
2088 2088 if follow and not match.always() and not slowpath:
2089 2089 # _makefollowlogfilematcher expects its files argument to be
2090 2090 # relative to the repo root, so use match.files(), not pats.
2091 2091 filematcher = _makefollowlogfilematcher(repo, match.files(),
2092 2092 followfirst)
2093 2093 else:
2094 2094 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2095 2095 if filematcher is None:
2096 2096 filematcher = lambda rev: match
2097 2097
2098 2098 expr = []
2099 2099 for op, val in sorted(opts.iteritems()):
2100 2100 if not val:
2101 2101 continue
2102 2102 if op not in opt2revset:
2103 2103 continue
2104 2104 revop, andor = opt2revset[op]
2105 2105 if '%(val)' not in revop:
2106 2106 expr.append(revop)
2107 2107 else:
2108 2108 if not isinstance(val, list):
2109 2109 e = revop % {'val': val}
2110 2110 else:
2111 2111 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2112 2112 expr.append(e)
2113 2113
2114 2114 if expr:
2115 2115 expr = '(' + ' and '.join(expr) + ')'
2116 2116 else:
2117 2117 expr = None
2118 2118 return expr, filematcher
2119 2119
2120 2120 def _logrevs(repo, opts):
2121 2121 # Default --rev value depends on --follow but --follow behavior
2122 2122 # depends on revisions resolved from --rev...
2123 2123 follow = opts.get('follow') or opts.get('follow_first')
2124 2124 if opts.get('rev'):
2125 2125 revs = scmutil.revrange(repo, opts['rev'])
2126 2126 elif follow and repo.dirstate.p1() == nullid:
2127 2127 revs = revset.baseset()
2128 2128 elif follow:
2129 2129 revs = repo.revs('reverse(:.)')
2130 2130 else:
2131 2131 revs = revset.spanset(repo)
2132 2132 revs.reverse()
2133 2133 return revs
2134 2134
2135 2135 def getgraphlogrevs(repo, pats, opts):
2136 2136 """Return (revs, expr, filematcher) where revs is an iterable of
2137 2137 revision numbers, expr is a revset string built from log options
2138 2138 and file patterns or None, and used to filter 'revs'. If --stat or
2139 2139 --patch are not passed filematcher is None. Otherwise it is a
2140 2140 callable taking a revision number and returning a match objects
2141 2141 filtering the files to be detailed when displaying the revision.
2142 2142 """
2143 2143 limit = loglimit(opts)
2144 2144 revs = _logrevs(repo, opts)
2145 2145 if not revs:
2146 2146 return revset.baseset(), None, None
2147 2147 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2148 2148 if opts.get('rev'):
2149 2149 # User-specified revs might be unsorted, but don't sort before
2150 2150 # _makelogrevset because it might depend on the order of revs
2151 2151 if not (revs.isdescending() or revs.istopo()):
2152 2152 revs.sort(reverse=True)
2153 2153 if expr:
2154 2154 # Revset matchers often operate faster on revisions in changelog
2155 2155 # order, because most filters deal with the changelog.
2156 2156 revs.reverse()
2157 2157 matcher = revset.match(repo.ui, expr)
2158 2158 # Revset matches can reorder revisions. "A or B" typically returns
2159 2159 # returns the revision matching A then the revision matching B. Sort
2160 2160 # again to fix that.
2161 2161 revs = matcher(repo, revs)
2162 2162 revs.sort(reverse=True)
2163 2163 if limit is not None:
2164 2164 limitedrevs = []
2165 2165 for idx, rev in enumerate(revs):
2166 2166 if idx >= limit:
2167 2167 break
2168 2168 limitedrevs.append(rev)
2169 2169 revs = revset.baseset(limitedrevs)
2170 2170
2171 2171 return revs, expr, filematcher
2172 2172
2173 2173 def getlogrevs(repo, pats, opts):
2174 2174 """Return (revs, expr, filematcher) where revs is an iterable of
2175 2175 revision numbers, expr is a revset string built from log options
2176 2176 and file patterns or None, and used to filter 'revs'. If --stat or
2177 2177 --patch are not passed filematcher is None. Otherwise it is a
2178 2178 callable taking a revision number and returning a match objects
2179 2179 filtering the files to be detailed when displaying the revision.
2180 2180 """
2181 2181 limit = loglimit(opts)
2182 2182 revs = _logrevs(repo, opts)
2183 2183 if not revs:
2184 2184 return revset.baseset([]), None, None
2185 2185 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2186 2186 if expr:
2187 2187 # Revset matchers often operate faster on revisions in changelog
2188 2188 # order, because most filters deal with the changelog.
2189 2189 if not opts.get('rev'):
2190 2190 revs.reverse()
2191 2191 matcher = revset.match(repo.ui, expr)
2192 2192 # Revset matches can reorder revisions. "A or B" typically returns
2193 2193 # returns the revision matching A then the revision matching B. Sort
2194 2194 # again to fix that.
2195 2195 fixopts = ['branch', 'only_branch', 'keyword', 'user']
2196 2196 oldrevs = revs
2197 2197 revs = matcher(repo, revs)
2198 2198 if not opts.get('rev'):
2199 2199 revs.sort(reverse=True)
2200 2200 elif len(pats) > 1 or any(len(opts.get(op, [])) > 1 for op in fixopts):
2201 2201 # XXX "A or B" is known to change the order; fix it by filtering
2202 2202 # matched set again (issue5100)
2203 2203 revs = oldrevs & revs
2204 2204 if limit is not None:
2205 2205 limitedrevs = []
2206 2206 for idx, r in enumerate(revs):
2207 2207 if limit <= idx:
2208 2208 break
2209 2209 limitedrevs.append(r)
2210 2210 revs = revset.baseset(limitedrevs)
2211 2211
2212 2212 return revs, expr, filematcher
2213 2213
2214 2214 def _graphnodeformatter(ui, displayer):
2215 2215 spec = ui.config('ui', 'graphnodetemplate')
2216 2216 if not spec:
2217 2217 return templatekw.showgraphnode # fast path for "{graphnode}"
2218 2218
2219 2219 templ = formatter.gettemplater(ui, 'graphnode', spec)
2220 2220 cache = {}
2221 2221 if isinstance(displayer, changeset_templater):
2222 2222 cache = displayer.cache # reuse cache of slow templates
2223 2223 props = templatekw.keywords.copy()
2224 2224 props['templ'] = templ
2225 2225 props['cache'] = cache
2226 2226 def formatnode(repo, ctx):
2227 2227 props['ctx'] = ctx
2228 2228 props['repo'] = repo
2229 2229 props['ui'] = repo.ui
2230 2230 props['revcache'] = {}
2231 2231 return templater.stringify(templ('graphnode', **props))
2232 2232 return formatnode
2233 2233
2234 2234 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2235 2235 filematcher=None):
2236 2236 formatnode = _graphnodeformatter(ui, displayer)
2237 2237 state = graphmod.asciistate()
2238 2238 styles = state['styles']
2239 2239
2240 2240 # only set graph styling if HGPLAIN is not set.
2241 2241 if ui.plain('graph'):
2242 2242 # set all edge styles to |, the default pre-3.8 behaviour
2243 2243 styles.update(dict.fromkeys(styles, '|'))
2244 2244 else:
2245 2245 edgetypes = {
2246 2246 'parent': graphmod.PARENT,
2247 2247 'grandparent': graphmod.GRANDPARENT,
2248 2248 'missing': graphmod.MISSINGPARENT
2249 2249 }
2250 2250 for name, key in edgetypes.items():
2251 2251 # experimental config: experimental.graphstyle.*
2252 2252 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2253 2253 styles[key])
2254 2254 if not styles[key]:
2255 2255 styles[key] = None
2256 2256
2257 2257 # experimental config: experimental.graphshorten
2258 2258 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2259 2259
2260 2260 for rev, type, ctx, parents in dag:
2261 2261 char = formatnode(repo, ctx)
2262 2262 copies = None
2263 2263 if getrenamed and ctx.rev():
2264 2264 copies = []
2265 2265 for fn in ctx.files():
2266 2266 rename = getrenamed(fn, ctx.rev())
2267 2267 if rename:
2268 2268 copies.append((fn, rename[0]))
2269 2269 revmatchfn = None
2270 2270 if filematcher is not None:
2271 2271 revmatchfn = filematcher(ctx.rev())
2272 2272 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2273 2273 lines = displayer.hunk.pop(rev).split('\n')
2274 2274 if not lines[-1]:
2275 2275 del lines[-1]
2276 2276 displayer.flush(ctx)
2277 2277 edges = edgefn(type, char, lines, state, rev, parents)
2278 2278 for type, char, lines, coldata in edges:
2279 2279 graphmod.ascii(ui, state, type, char, lines, coldata)
2280 2280 displayer.close()
2281 2281
2282 2282 def graphlog(ui, repo, *pats, **opts):
2283 2283 # Parameters are identical to log command ones
2284 2284 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2285 2285 revdag = graphmod.dagwalker(repo, revs)
2286 2286
2287 2287 getrenamed = None
2288 2288 if opts.get('copies'):
2289 2289 endrev = None
2290 2290 if opts.get('rev'):
2291 2291 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2292 2292 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2293 2293 displayer = show_changeset(ui, repo, opts, buffered=True)
2294 2294 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2295 2295 filematcher)
2296 2296
2297 2297 def checkunsupportedgraphflags(pats, opts):
2298 2298 for op in ["newest_first"]:
2299 2299 if op in opts and opts[op]:
2300 2300 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2301 2301 % op.replace("_", "-"))
2302 2302
2303 2303 def graphrevs(repo, nodes, opts):
2304 2304 limit = loglimit(opts)
2305 2305 nodes.reverse()
2306 2306 if limit is not None:
2307 2307 nodes = nodes[:limit]
2308 2308 return graphmod.nodes(repo, nodes)
2309 2309
2310 2310 def add(ui, repo, match, prefix, explicitonly, **opts):
2311 2311 join = lambda f: os.path.join(prefix, f)
2312 2312 bad = []
2313 2313
2314 2314 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2315 2315 names = []
2316 2316 wctx = repo[None]
2317 2317 cca = None
2318 2318 abort, warn = scmutil.checkportabilityalert(ui)
2319 2319 if abort or warn:
2320 2320 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2321 2321
2322 2322 badmatch = matchmod.badmatch(match, badfn)
2323 2323 dirstate = repo.dirstate
2324 2324 # We don't want to just call wctx.walk here, since it would return a lot of
2325 2325 # clean files, which we aren't interested in and takes time.
2326 2326 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2327 2327 True, False, full=False)):
2328 2328 exact = match.exact(f)
2329 2329 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2330 2330 if cca:
2331 2331 cca(f)
2332 2332 names.append(f)
2333 2333 if ui.verbose or not exact:
2334 2334 ui.status(_('adding %s\n') % match.rel(f))
2335 2335
2336 2336 for subpath in sorted(wctx.substate):
2337 2337 sub = wctx.sub(subpath)
2338 2338 try:
2339 2339 submatch = matchmod.subdirmatcher(subpath, match)
2340 2340 if opts.get('subrepos'):
2341 2341 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2342 2342 else:
2343 2343 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2344 2344 except error.LookupError:
2345 2345 ui.status(_("skipping missing subrepository: %s\n")
2346 2346 % join(subpath))
2347 2347
2348 2348 if not opts.get('dry_run'):
2349 2349 rejected = wctx.add(names, prefix)
2350 2350 bad.extend(f for f in rejected if f in match.files())
2351 2351 return bad
2352 2352
2353 2353 def forget(ui, repo, match, prefix, explicitonly):
2354 2354 join = lambda f: os.path.join(prefix, f)
2355 2355 bad = []
2356 2356 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2357 2357 wctx = repo[None]
2358 2358 forgot = []
2359 2359
2360 2360 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2361 2361 forget = sorted(s[0] + s[1] + s[3] + s[6])
2362 2362 if explicitonly:
2363 2363 forget = [f for f in forget if match.exact(f)]
2364 2364
2365 2365 for subpath in sorted(wctx.substate):
2366 2366 sub = wctx.sub(subpath)
2367 2367 try:
2368 2368 submatch = matchmod.subdirmatcher(subpath, match)
2369 2369 subbad, subforgot = sub.forget(submatch, prefix)
2370 2370 bad.extend([subpath + '/' + f for f in subbad])
2371 2371 forgot.extend([subpath + '/' + f for f in subforgot])
2372 2372 except error.LookupError:
2373 2373 ui.status(_("skipping missing subrepository: %s\n")
2374 2374 % join(subpath))
2375 2375
2376 2376 if not explicitonly:
2377 2377 for f in match.files():
2378 2378 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2379 2379 if f not in forgot:
2380 2380 if repo.wvfs.exists(f):
2381 2381 # Don't complain if the exact case match wasn't given.
2382 2382 # But don't do this until after checking 'forgot', so
2383 2383 # that subrepo files aren't normalized, and this op is
2384 2384 # purely from data cached by the status walk above.
2385 2385 if repo.dirstate.normalize(f) in repo.dirstate:
2386 2386 continue
2387 2387 ui.warn(_('not removing %s: '
2388 2388 'file is already untracked\n')
2389 2389 % match.rel(f))
2390 2390 bad.append(f)
2391 2391
2392 2392 for f in forget:
2393 2393 if ui.verbose or not match.exact(f):
2394 2394 ui.status(_('removing %s\n') % match.rel(f))
2395 2395
2396 2396 rejected = wctx.forget(forget, prefix)
2397 2397 bad.extend(f for f in rejected if f in match.files())
2398 2398 forgot.extend(f for f in forget if f not in rejected)
2399 2399 return bad, forgot
2400 2400
2401 2401 def files(ui, ctx, m, fm, fmt, subrepos):
2402 2402 rev = ctx.rev()
2403 2403 ret = 1
2404 2404 ds = ctx.repo().dirstate
2405 2405
2406 2406 for f in ctx.matches(m):
2407 2407 if rev is None and ds[f] == 'r':
2408 2408 continue
2409 2409 fm.startitem()
2410 2410 if ui.verbose:
2411 2411 fc = ctx[f]
2412 2412 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2413 2413 fm.data(abspath=f)
2414 2414 fm.write('path', fmt, m.rel(f))
2415 2415 ret = 0
2416 2416
2417 2417 for subpath in sorted(ctx.substate):
2418 def matchessubrepo(subpath):
2419 return (m.exact(subpath)
2420 or any(f.startswith(subpath + '/') for f in m.files()))
2421
2422 if subrepos or matchessubrepo(subpath):
2418 if subrepos or m.matchessubrepo(subpath):
2423 2419 sub = ctx.sub(subpath)
2424 2420 try:
2425 2421 submatch = matchmod.subdirmatcher(subpath, m)
2426 2422 recurse = m.exact(subpath) or subrepos
2427 2423 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2428 2424 ret = 0
2429 2425 except error.LookupError:
2430 2426 ui.status(_("skipping missing subrepository: %s\n")
2431 2427 % m.abs(subpath))
2432 2428
2433 2429 return ret
2434 2430
2435 2431 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2436 2432 join = lambda f: os.path.join(prefix, f)
2437 2433 ret = 0
2438 2434 s = repo.status(match=m, clean=True)
2439 2435 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2440 2436
2441 2437 wctx = repo[None]
2442 2438
2443 2439 if warnings is None:
2444 2440 warnings = []
2445 2441 warn = True
2446 2442 else:
2447 2443 warn = False
2448 2444
2449 2445 subs = sorted(wctx.substate)
2450 2446 total = len(subs)
2451 2447 count = 0
2452 2448 for subpath in subs:
2453 def matchessubrepo(matcher, subpath):
2454 if matcher.exact(subpath):
2455 return True
2456 for f in matcher.files():
2457 if f.startswith(subpath):
2458 return True
2459 return False
2460
2461 2449 count += 1
2462 if subrepos or matchessubrepo(m, subpath):
2450 if subrepos or m.matchessubrepo(subpath):
2463 2451 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2464 2452
2465 2453 sub = wctx.sub(subpath)
2466 2454 try:
2467 2455 submatch = matchmod.subdirmatcher(subpath, m)
2468 2456 if sub.removefiles(submatch, prefix, after, force, subrepos,
2469 2457 warnings):
2470 2458 ret = 1
2471 2459 except error.LookupError:
2472 2460 warnings.append(_("skipping missing subrepository: %s\n")
2473 2461 % join(subpath))
2474 2462 ui.progress(_('searching'), None)
2475 2463
2476 2464 # warn about failure to delete explicit files/dirs
2477 2465 deleteddirs = util.dirs(deleted)
2478 2466 files = m.files()
2479 2467 total = len(files)
2480 2468 count = 0
2481 2469 for f in files:
2482 2470 def insubrepo():
2483 2471 for subpath in wctx.substate:
2484 2472 if f.startswith(subpath + '/'):
2485 2473 return True
2486 2474 return False
2487 2475
2488 2476 count += 1
2489 2477 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2490 2478 isdir = f in deleteddirs or wctx.hasdir(f)
2491 2479 if (f in repo.dirstate or isdir or f == '.'
2492 2480 or insubrepo() or f in subs):
2493 2481 continue
2494 2482
2495 2483 if repo.wvfs.exists(f):
2496 2484 if repo.wvfs.isdir(f):
2497 2485 warnings.append(_('not removing %s: no tracked files\n')
2498 2486 % m.rel(f))
2499 2487 else:
2500 2488 warnings.append(_('not removing %s: file is untracked\n')
2501 2489 % m.rel(f))
2502 2490 # missing files will generate a warning elsewhere
2503 2491 ret = 1
2504 2492 ui.progress(_('deleting'), None)
2505 2493
2506 2494 if force:
2507 2495 list = modified + deleted + clean + added
2508 2496 elif after:
2509 2497 list = deleted
2510 2498 remaining = modified + added + clean
2511 2499 total = len(remaining)
2512 2500 count = 0
2513 2501 for f in remaining:
2514 2502 count += 1
2515 2503 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2516 2504 warnings.append(_('not removing %s: file still exists\n')
2517 2505 % m.rel(f))
2518 2506 ret = 1
2519 2507 ui.progress(_('skipping'), None)
2520 2508 else:
2521 2509 list = deleted + clean
2522 2510 total = len(modified) + len(added)
2523 2511 count = 0
2524 2512 for f in modified:
2525 2513 count += 1
2526 2514 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2527 2515 warnings.append(_('not removing %s: file is modified (use -f'
2528 2516 ' to force removal)\n') % m.rel(f))
2529 2517 ret = 1
2530 2518 for f in added:
2531 2519 count += 1
2532 2520 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2533 2521 warnings.append(_('not removing %s: file has been marked for add'
2534 2522 ' (use forget to undo)\n') % m.rel(f))
2535 2523 ret = 1
2536 2524 ui.progress(_('skipping'), None)
2537 2525
2538 2526 list = sorted(list)
2539 2527 total = len(list)
2540 2528 count = 0
2541 2529 for f in list:
2542 2530 count += 1
2543 2531 if ui.verbose or not m.exact(f):
2544 2532 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2545 2533 ui.status(_('removing %s\n') % m.rel(f))
2546 2534 ui.progress(_('deleting'), None)
2547 2535
2548 2536 with repo.wlock():
2549 2537 if not after:
2550 2538 for f in list:
2551 2539 if f in added:
2552 2540 continue # we never unlink added files on remove
2553 2541 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2554 2542 repo[None].forget(list)
2555 2543
2556 2544 if warn:
2557 2545 for warning in warnings:
2558 2546 ui.warn(warning)
2559 2547
2560 2548 return ret
2561 2549
2562 2550 def cat(ui, repo, ctx, matcher, prefix, **opts):
2563 2551 err = 1
2564 2552
2565 2553 def write(path):
2566 2554 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2567 2555 pathname=os.path.join(prefix, path))
2568 2556 data = ctx[path].data()
2569 2557 if opts.get('decode'):
2570 2558 data = repo.wwritedata(path, data)
2571 2559 fp.write(data)
2572 2560 fp.close()
2573 2561
2574 2562 # Automation often uses hg cat on single files, so special case it
2575 2563 # for performance to avoid the cost of parsing the manifest.
2576 2564 if len(matcher.files()) == 1 and not matcher.anypats():
2577 2565 file = matcher.files()[0]
2578 2566 mf = repo.manifest
2579 2567 mfnode = ctx.manifestnode()
2580 2568 if mfnode and mf.find(mfnode, file)[0]:
2581 2569 write(file)
2582 2570 return 0
2583 2571
2584 2572 for abs in ctx.walk(matcher):
2585 2573 write(abs)
2586 2574 err = 0
2587 2575
2588 2576 for subpath in sorted(ctx.substate):
2589 2577 sub = ctx.sub(subpath)
2590 2578 try:
2591 2579 submatch = matchmod.subdirmatcher(subpath, matcher)
2592 2580
2593 2581 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2594 2582 **opts):
2595 2583 err = 0
2596 2584 except error.RepoLookupError:
2597 2585 ui.status(_("skipping missing subrepository: %s\n")
2598 2586 % os.path.join(prefix, subpath))
2599 2587
2600 2588 return err
2601 2589
2602 2590 def commit(ui, repo, commitfunc, pats, opts):
2603 2591 '''commit the specified files or all outstanding changes'''
2604 2592 date = opts.get('date')
2605 2593 if date:
2606 2594 opts['date'] = util.parsedate(date)
2607 2595 message = logmessage(ui, opts)
2608 2596 matcher = scmutil.match(repo[None], pats, opts)
2609 2597
2610 2598 # extract addremove carefully -- this function can be called from a command
2611 2599 # that doesn't support addremove
2612 2600 if opts.get('addremove'):
2613 2601 if scmutil.addremove(repo, matcher, "", opts) != 0:
2614 2602 raise error.Abort(
2615 2603 _("failed to mark all new/missing files as added/removed"))
2616 2604
2617 2605 return commitfunc(ui, repo, message, matcher, opts)
2618 2606
2619 2607 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2620 2608 # avoid cycle context -> subrepo -> cmdutil
2621 2609 from . import context
2622 2610
2623 2611 # amend will reuse the existing user if not specified, but the obsolete
2624 2612 # marker creation requires that the current user's name is specified.
2625 2613 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2626 2614 ui.username() # raise exception if username not set
2627 2615
2628 2616 ui.note(_('amending changeset %s\n') % old)
2629 2617 base = old.p1()
2630 2618 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2631 2619
2632 2620 wlock = lock = newid = None
2633 2621 try:
2634 2622 wlock = repo.wlock()
2635 2623 lock = repo.lock()
2636 2624 with repo.transaction('amend') as tr:
2637 2625 # See if we got a message from -m or -l, if not, open the editor
2638 2626 # with the message of the changeset to amend
2639 2627 message = logmessage(ui, opts)
2640 2628 # ensure logfile does not conflict with later enforcement of the
2641 2629 # message. potential logfile content has been processed by
2642 2630 # `logmessage` anyway.
2643 2631 opts.pop('logfile')
2644 2632 # First, do a regular commit to record all changes in the working
2645 2633 # directory (if there are any)
2646 2634 ui.callhooks = False
2647 2635 activebookmark = repo._bookmarks.active
2648 2636 try:
2649 2637 repo._bookmarks.active = None
2650 2638 opts['message'] = 'temporary amend commit for %s' % old
2651 2639 node = commit(ui, repo, commitfunc, pats, opts)
2652 2640 finally:
2653 2641 repo._bookmarks.active = activebookmark
2654 2642 repo._bookmarks.recordchange(tr)
2655 2643 ui.callhooks = True
2656 2644 ctx = repo[node]
2657 2645
2658 2646 # Participating changesets:
2659 2647 #
2660 2648 # node/ctx o - new (intermediate) commit that contains changes
2661 2649 # | from working dir to go into amending commit
2662 2650 # | (or a workingctx if there were no changes)
2663 2651 # |
2664 2652 # old o - changeset to amend
2665 2653 # |
2666 2654 # base o - parent of amending changeset
2667 2655
2668 2656 # Update extra dict from amended commit (e.g. to preserve graft
2669 2657 # source)
2670 2658 extra.update(old.extra())
2671 2659
2672 2660 # Also update it from the intermediate commit or from the wctx
2673 2661 extra.update(ctx.extra())
2674 2662
2675 2663 if len(old.parents()) > 1:
2676 2664 # ctx.files() isn't reliable for merges, so fall back to the
2677 2665 # slower repo.status() method
2678 2666 files = set([fn for st in repo.status(base, old)[:3]
2679 2667 for fn in st])
2680 2668 else:
2681 2669 files = set(old.files())
2682 2670
2683 2671 # Second, we use either the commit we just did, or if there were no
2684 2672 # changes the parent of the working directory as the version of the
2685 2673 # files in the final amend commit
2686 2674 if node:
2687 2675 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2688 2676
2689 2677 user = ctx.user()
2690 2678 date = ctx.date()
2691 2679 # Recompute copies (avoid recording a -> b -> a)
2692 2680 copied = copies.pathcopies(base, ctx)
2693 2681 if old.p2:
2694 2682 copied.update(copies.pathcopies(old.p2(), ctx))
2695 2683
2696 2684 # Prune files which were reverted by the updates: if old
2697 2685 # introduced file X and our intermediate commit, node,
2698 2686 # renamed that file, then those two files are the same and
2699 2687 # we can discard X from our list of files. Likewise if X
2700 2688 # was deleted, it's no longer relevant
2701 2689 files.update(ctx.files())
2702 2690
2703 2691 def samefile(f):
2704 2692 if f in ctx.manifest():
2705 2693 a = ctx.filectx(f)
2706 2694 if f in base.manifest():
2707 2695 b = base.filectx(f)
2708 2696 return (not a.cmp(b)
2709 2697 and a.flags() == b.flags())
2710 2698 else:
2711 2699 return False
2712 2700 else:
2713 2701 return f not in base.manifest()
2714 2702 files = [f for f in files if not samefile(f)]
2715 2703
2716 2704 def filectxfn(repo, ctx_, path):
2717 2705 try:
2718 2706 fctx = ctx[path]
2719 2707 flags = fctx.flags()
2720 2708 mctx = context.memfilectx(repo,
2721 2709 fctx.path(), fctx.data(),
2722 2710 islink='l' in flags,
2723 2711 isexec='x' in flags,
2724 2712 copied=copied.get(path))
2725 2713 return mctx
2726 2714 except KeyError:
2727 2715 return None
2728 2716 else:
2729 2717 ui.note(_('copying changeset %s to %s\n') % (old, base))
2730 2718
2731 2719 # Use version of files as in the old cset
2732 2720 def filectxfn(repo, ctx_, path):
2733 2721 try:
2734 2722 return old.filectx(path)
2735 2723 except KeyError:
2736 2724 return None
2737 2725
2738 2726 user = opts.get('user') or old.user()
2739 2727 date = opts.get('date') or old.date()
2740 2728 editform = mergeeditform(old, 'commit.amend')
2741 2729 editor = getcommiteditor(editform=editform, **opts)
2742 2730 if not message:
2743 2731 editor = getcommiteditor(edit=True, editform=editform)
2744 2732 message = old.description()
2745 2733
2746 2734 pureextra = extra.copy()
2747 2735 extra['amend_source'] = old.hex()
2748 2736
2749 2737 new = context.memctx(repo,
2750 2738 parents=[base.node(), old.p2().node()],
2751 2739 text=message,
2752 2740 files=files,
2753 2741 filectxfn=filectxfn,
2754 2742 user=user,
2755 2743 date=date,
2756 2744 extra=extra,
2757 2745 editor=editor)
2758 2746
2759 2747 newdesc = changelog.stripdesc(new.description())
2760 2748 if ((not node)
2761 2749 and newdesc == old.description()
2762 2750 and user == old.user()
2763 2751 and date == old.date()
2764 2752 and pureextra == old.extra()):
2765 2753 # nothing changed. continuing here would create a new node
2766 2754 # anyway because of the amend_source noise.
2767 2755 #
2768 2756 # This not what we expect from amend.
2769 2757 return old.node()
2770 2758
2771 2759 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2772 2760 try:
2773 2761 if opts.get('secret'):
2774 2762 commitphase = 'secret'
2775 2763 else:
2776 2764 commitphase = old.phase()
2777 2765 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2778 2766 newid = repo.commitctx(new)
2779 2767 finally:
2780 2768 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2781 2769 if newid != old.node():
2782 2770 # Reroute the working copy parent to the new changeset
2783 2771 repo.setparents(newid, nullid)
2784 2772
2785 2773 # Move bookmarks from old parent to amend commit
2786 2774 bms = repo.nodebookmarks(old.node())
2787 2775 if bms:
2788 2776 marks = repo._bookmarks
2789 2777 for bm in bms:
2790 2778 ui.debug('moving bookmarks %r from %s to %s\n' %
2791 2779 (marks, old.hex(), hex(newid)))
2792 2780 marks[bm] = newid
2793 2781 marks.recordchange(tr)
2794 2782 #commit the whole amend process
2795 2783 if createmarkers:
2796 2784 # mark the new changeset as successor of the rewritten one
2797 2785 new = repo[newid]
2798 2786 obs = [(old, (new,))]
2799 2787 if node:
2800 2788 obs.append((ctx, ()))
2801 2789
2802 2790 obsolete.createmarkers(repo, obs)
2803 2791 if not createmarkers and newid != old.node():
2804 2792 # Strip the intermediate commit (if there was one) and the amended
2805 2793 # commit
2806 2794 if node:
2807 2795 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2808 2796 ui.note(_('stripping amended changeset %s\n') % old)
2809 2797 repair.strip(ui, repo, old.node(), topic='amend-backup')
2810 2798 finally:
2811 2799 lockmod.release(lock, wlock)
2812 2800 return newid
2813 2801
2814 2802 def commiteditor(repo, ctx, subs, editform=''):
2815 2803 if ctx.description():
2816 2804 return ctx.description()
2817 2805 return commitforceeditor(repo, ctx, subs, editform=editform,
2818 2806 unchangedmessagedetection=True)
2819 2807
2820 2808 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2821 2809 editform='', unchangedmessagedetection=False):
2822 2810 if not extramsg:
2823 2811 extramsg = _("Leave message empty to abort commit.")
2824 2812
2825 2813 forms = [e for e in editform.split('.') if e]
2826 2814 forms.insert(0, 'changeset')
2827 2815 templatetext = None
2828 2816 while forms:
2829 2817 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2830 2818 if tmpl:
2831 2819 templatetext = committext = buildcommittemplate(
2832 2820 repo, ctx, subs, extramsg, tmpl)
2833 2821 break
2834 2822 forms.pop()
2835 2823 else:
2836 2824 committext = buildcommittext(repo, ctx, subs, extramsg)
2837 2825
2838 2826 # run editor in the repository root
2839 2827 olddir = os.getcwd()
2840 2828 os.chdir(repo.root)
2841 2829
2842 2830 # make in-memory changes visible to external process
2843 2831 tr = repo.currenttransaction()
2844 2832 repo.dirstate.write(tr)
2845 2833 pending = tr and tr.writepending() and repo.root
2846 2834
2847 2835 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2848 2836 editform=editform, pending=pending)
2849 2837 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2850 2838 os.chdir(olddir)
2851 2839
2852 2840 if finishdesc:
2853 2841 text = finishdesc(text)
2854 2842 if not text.strip():
2855 2843 raise error.Abort(_("empty commit message"))
2856 2844 if unchangedmessagedetection and editortext == templatetext:
2857 2845 raise error.Abort(_("commit message unchanged"))
2858 2846
2859 2847 return text
2860 2848
2861 2849 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2862 2850 ui = repo.ui
2863 2851 tmpl, mapfile = gettemplate(ui, tmpl, None)
2864 2852
2865 2853 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2866 2854
2867 2855 for k, v in repo.ui.configitems('committemplate'):
2868 2856 if k != 'changeset':
2869 2857 t.t.cache[k] = v
2870 2858
2871 2859 if not extramsg:
2872 2860 extramsg = '' # ensure that extramsg is string
2873 2861
2874 2862 ui.pushbuffer()
2875 2863 t.show(ctx, extramsg=extramsg)
2876 2864 return ui.popbuffer()
2877 2865
2878 2866 def hgprefix(msg):
2879 2867 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2880 2868
2881 2869 def buildcommittext(repo, ctx, subs, extramsg):
2882 2870 edittext = []
2883 2871 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2884 2872 if ctx.description():
2885 2873 edittext.append(ctx.description())
2886 2874 edittext.append("")
2887 2875 edittext.append("") # Empty line between message and comments.
2888 2876 edittext.append(hgprefix(_("Enter commit message."
2889 2877 " Lines beginning with 'HG:' are removed.")))
2890 2878 edittext.append(hgprefix(extramsg))
2891 2879 edittext.append("HG: --")
2892 2880 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2893 2881 if ctx.p2():
2894 2882 edittext.append(hgprefix(_("branch merge")))
2895 2883 if ctx.branch():
2896 2884 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2897 2885 if bookmarks.isactivewdirparent(repo):
2898 2886 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2899 2887 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2900 2888 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2901 2889 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2902 2890 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2903 2891 if not added and not modified and not removed:
2904 2892 edittext.append(hgprefix(_("no files changed")))
2905 2893 edittext.append("")
2906 2894
2907 2895 return "\n".join(edittext)
2908 2896
2909 2897 def commitstatus(repo, node, branch, bheads=None, opts=None):
2910 2898 if opts is None:
2911 2899 opts = {}
2912 2900 ctx = repo[node]
2913 2901 parents = ctx.parents()
2914 2902
2915 2903 if (not opts.get('amend') and bheads and node not in bheads and not
2916 2904 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2917 2905 repo.ui.status(_('created new head\n'))
2918 2906 # The message is not printed for initial roots. For the other
2919 2907 # changesets, it is printed in the following situations:
2920 2908 #
2921 2909 # Par column: for the 2 parents with ...
2922 2910 # N: null or no parent
2923 2911 # B: parent is on another named branch
2924 2912 # C: parent is a regular non head changeset
2925 2913 # H: parent was a branch head of the current branch
2926 2914 # Msg column: whether we print "created new head" message
2927 2915 # In the following, it is assumed that there already exists some
2928 2916 # initial branch heads of the current branch, otherwise nothing is
2929 2917 # printed anyway.
2930 2918 #
2931 2919 # Par Msg Comment
2932 2920 # N N y additional topo root
2933 2921 #
2934 2922 # B N y additional branch root
2935 2923 # C N y additional topo head
2936 2924 # H N n usual case
2937 2925 #
2938 2926 # B B y weird additional branch root
2939 2927 # C B y branch merge
2940 2928 # H B n merge with named branch
2941 2929 #
2942 2930 # C C y additional head from merge
2943 2931 # C H n merge with a head
2944 2932 #
2945 2933 # H H n head merge: head count decreases
2946 2934
2947 2935 if not opts.get('close_branch'):
2948 2936 for r in parents:
2949 2937 if r.closesbranch() and r.branch() == branch:
2950 2938 repo.ui.status(_('reopening closed branch head %d\n') % r)
2951 2939
2952 2940 if repo.ui.debugflag:
2953 2941 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2954 2942 elif repo.ui.verbose:
2955 2943 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2956 2944
2957 2945 def postcommitstatus(repo, pats, opts):
2958 2946 return repo.status(match=scmutil.match(repo[None], pats, opts))
2959 2947
2960 2948 def revert(ui, repo, ctx, parents, *pats, **opts):
2961 2949 parent, p2 = parents
2962 2950 node = ctx.node()
2963 2951
2964 2952 mf = ctx.manifest()
2965 2953 if node == p2:
2966 2954 parent = p2
2967 2955
2968 2956 # need all matching names in dirstate and manifest of target rev,
2969 2957 # so have to walk both. do not print errors if files exist in one
2970 2958 # but not other. in both cases, filesets should be evaluated against
2971 2959 # workingctx to get consistent result (issue4497). this means 'set:**'
2972 2960 # cannot be used to select missing files from target rev.
2973 2961
2974 2962 # `names` is a mapping for all elements in working copy and target revision
2975 2963 # The mapping is in the form:
2976 2964 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2977 2965 names = {}
2978 2966
2979 2967 with repo.wlock():
2980 2968 ## filling of the `names` mapping
2981 2969 # walk dirstate to fill `names`
2982 2970
2983 2971 interactive = opts.get('interactive', False)
2984 2972 wctx = repo[None]
2985 2973 m = scmutil.match(wctx, pats, opts)
2986 2974
2987 2975 # we'll need this later
2988 2976 targetsubs = sorted(s for s in wctx.substate if m(s))
2989 2977
2990 2978 if not m.always():
2991 2979 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2992 2980 names[abs] = m.rel(abs), m.exact(abs)
2993 2981
2994 2982 # walk target manifest to fill `names`
2995 2983
2996 2984 def badfn(path, msg):
2997 2985 if path in names:
2998 2986 return
2999 2987 if path in ctx.substate:
3000 2988 return
3001 2989 path_ = path + '/'
3002 2990 for f in names:
3003 2991 if f.startswith(path_):
3004 2992 return
3005 2993 ui.warn("%s: %s\n" % (m.rel(path), msg))
3006 2994
3007 2995 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3008 2996 if abs not in names:
3009 2997 names[abs] = m.rel(abs), m.exact(abs)
3010 2998
3011 2999 # Find status of all file in `names`.
3012 3000 m = scmutil.matchfiles(repo, names)
3013 3001
3014 3002 changes = repo.status(node1=node, match=m,
3015 3003 unknown=True, ignored=True, clean=True)
3016 3004 else:
3017 3005 changes = repo.status(node1=node, match=m)
3018 3006 for kind in changes:
3019 3007 for abs in kind:
3020 3008 names[abs] = m.rel(abs), m.exact(abs)
3021 3009
3022 3010 m = scmutil.matchfiles(repo, names)
3023 3011
3024 3012 modified = set(changes.modified)
3025 3013 added = set(changes.added)
3026 3014 removed = set(changes.removed)
3027 3015 _deleted = set(changes.deleted)
3028 3016 unknown = set(changes.unknown)
3029 3017 unknown.update(changes.ignored)
3030 3018 clean = set(changes.clean)
3031 3019 modadded = set()
3032 3020
3033 3021 # split between files known in target manifest and the others
3034 3022 smf = set(mf)
3035 3023
3036 3024 # determine the exact nature of the deleted changesets
3037 3025 deladded = _deleted - smf
3038 3026 deleted = _deleted - deladded
3039 3027
3040 3028 # We need to account for the state of the file in the dirstate,
3041 3029 # even when we revert against something else than parent. This will
3042 3030 # slightly alter the behavior of revert (doing back up or not, delete
3043 3031 # or just forget etc).
3044 3032 if parent == node:
3045 3033 dsmodified = modified
3046 3034 dsadded = added
3047 3035 dsremoved = removed
3048 3036 # store all local modifications, useful later for rename detection
3049 3037 localchanges = dsmodified | dsadded
3050 3038 modified, added, removed = set(), set(), set()
3051 3039 else:
3052 3040 changes = repo.status(node1=parent, match=m)
3053 3041 dsmodified = set(changes.modified)
3054 3042 dsadded = set(changes.added)
3055 3043 dsremoved = set(changes.removed)
3056 3044 # store all local modifications, useful later for rename detection
3057 3045 localchanges = dsmodified | dsadded
3058 3046
3059 3047 # only take into account for removes between wc and target
3060 3048 clean |= dsremoved - removed
3061 3049 dsremoved &= removed
3062 3050 # distinct between dirstate remove and other
3063 3051 removed -= dsremoved
3064 3052
3065 3053 modadded = added & dsmodified
3066 3054 added -= modadded
3067 3055
3068 3056 # tell newly modified apart.
3069 3057 dsmodified &= modified
3070 3058 dsmodified |= modified & dsadded # dirstate added may need backup
3071 3059 modified -= dsmodified
3072 3060
3073 3061 # We need to wait for some post-processing to update this set
3074 3062 # before making the distinction. The dirstate will be used for
3075 3063 # that purpose.
3076 3064 dsadded = added
3077 3065
3078 3066 # in case of merge, files that are actually added can be reported as
3079 3067 # modified, we need to post process the result
3080 3068 if p2 != nullid:
3081 3069 mergeadd = dsmodified - smf
3082 3070 dsadded |= mergeadd
3083 3071 dsmodified -= mergeadd
3084 3072
3085 3073 # if f is a rename, update `names` to also revert the source
3086 3074 cwd = repo.getcwd()
3087 3075 for f in localchanges:
3088 3076 src = repo.dirstate.copied(f)
3089 3077 # XXX should we check for rename down to target node?
3090 3078 if src and src not in names and repo.dirstate[src] == 'r':
3091 3079 dsremoved.add(src)
3092 3080 names[src] = (repo.pathto(src, cwd), True)
3093 3081
3094 3082 # distinguish between file to forget and the other
3095 3083 added = set()
3096 3084 for abs in dsadded:
3097 3085 if repo.dirstate[abs] != 'a':
3098 3086 added.add(abs)
3099 3087 dsadded -= added
3100 3088
3101 3089 for abs in deladded:
3102 3090 if repo.dirstate[abs] == 'a':
3103 3091 dsadded.add(abs)
3104 3092 deladded -= dsadded
3105 3093
3106 3094 # For files marked as removed, we check if an unknown file is present at
3107 3095 # the same path. If a such file exists it may need to be backed up.
3108 3096 # Making the distinction at this stage helps have simpler backup
3109 3097 # logic.
3110 3098 removunk = set()
3111 3099 for abs in removed:
3112 3100 target = repo.wjoin(abs)
3113 3101 if os.path.lexists(target):
3114 3102 removunk.add(abs)
3115 3103 removed -= removunk
3116 3104
3117 3105 dsremovunk = set()
3118 3106 for abs in dsremoved:
3119 3107 target = repo.wjoin(abs)
3120 3108 if os.path.lexists(target):
3121 3109 dsremovunk.add(abs)
3122 3110 dsremoved -= dsremovunk
3123 3111
3124 3112 # action to be actually performed by revert
3125 3113 # (<list of file>, message>) tuple
3126 3114 actions = {'revert': ([], _('reverting %s\n')),
3127 3115 'add': ([], _('adding %s\n')),
3128 3116 'remove': ([], _('removing %s\n')),
3129 3117 'drop': ([], _('removing %s\n')),
3130 3118 'forget': ([], _('forgetting %s\n')),
3131 3119 'undelete': ([], _('undeleting %s\n')),
3132 3120 'noop': (None, _('no changes needed to %s\n')),
3133 3121 'unknown': (None, _('file not managed: %s\n')),
3134 3122 }
3135 3123
3136 3124 # "constant" that convey the backup strategy.
3137 3125 # All set to `discard` if `no-backup` is set do avoid checking
3138 3126 # no_backup lower in the code.
3139 3127 # These values are ordered for comparison purposes
3140 3128 backupinteractive = 3 # do backup if interactively modified
3141 3129 backup = 2 # unconditionally do backup
3142 3130 check = 1 # check if the existing file differs from target
3143 3131 discard = 0 # never do backup
3144 3132 if opts.get('no_backup'):
3145 3133 backupinteractive = backup = check = discard
3146 3134 if interactive:
3147 3135 dsmodifiedbackup = backupinteractive
3148 3136 else:
3149 3137 dsmodifiedbackup = backup
3150 3138 tobackup = set()
3151 3139
3152 3140 backupanddel = actions['remove']
3153 3141 if not opts.get('no_backup'):
3154 3142 backupanddel = actions['drop']
3155 3143
3156 3144 disptable = (
3157 3145 # dispatch table:
3158 3146 # file state
3159 3147 # action
3160 3148 # make backup
3161 3149
3162 3150 ## Sets that results that will change file on disk
3163 3151 # Modified compared to target, no local change
3164 3152 (modified, actions['revert'], discard),
3165 3153 # Modified compared to target, but local file is deleted
3166 3154 (deleted, actions['revert'], discard),
3167 3155 # Modified compared to target, local change
3168 3156 (dsmodified, actions['revert'], dsmodifiedbackup),
3169 3157 # Added since target
3170 3158 (added, actions['remove'], discard),
3171 3159 # Added in working directory
3172 3160 (dsadded, actions['forget'], discard),
3173 3161 # Added since target, have local modification
3174 3162 (modadded, backupanddel, backup),
3175 3163 # Added since target but file is missing in working directory
3176 3164 (deladded, actions['drop'], discard),
3177 3165 # Removed since target, before working copy parent
3178 3166 (removed, actions['add'], discard),
3179 3167 # Same as `removed` but an unknown file exists at the same path
3180 3168 (removunk, actions['add'], check),
3181 3169 # Removed since targe, marked as such in working copy parent
3182 3170 (dsremoved, actions['undelete'], discard),
3183 3171 # Same as `dsremoved` but an unknown file exists at the same path
3184 3172 (dsremovunk, actions['undelete'], check),
3185 3173 ## the following sets does not result in any file changes
3186 3174 # File with no modification
3187 3175 (clean, actions['noop'], discard),
3188 3176 # Existing file, not tracked anywhere
3189 3177 (unknown, actions['unknown'], discard),
3190 3178 )
3191 3179
3192 3180 for abs, (rel, exact) in sorted(names.items()):
3193 3181 # target file to be touch on disk (relative to cwd)
3194 3182 target = repo.wjoin(abs)
3195 3183 # search the entry in the dispatch table.
3196 3184 # if the file is in any of these sets, it was touched in the working
3197 3185 # directory parent and we are sure it needs to be reverted.
3198 3186 for table, (xlist, msg), dobackup in disptable:
3199 3187 if abs not in table:
3200 3188 continue
3201 3189 if xlist is not None:
3202 3190 xlist.append(abs)
3203 3191 if dobackup:
3204 3192 # If in interactive mode, don't automatically create
3205 3193 # .orig files (issue4793)
3206 3194 if dobackup == backupinteractive:
3207 3195 tobackup.add(abs)
3208 3196 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3209 3197 bakname = scmutil.origpath(ui, repo, rel)
3210 3198 ui.note(_('saving current version of %s as %s\n') %
3211 3199 (rel, bakname))
3212 3200 if not opts.get('dry_run'):
3213 3201 if interactive:
3214 3202 util.copyfile(target, bakname)
3215 3203 else:
3216 3204 util.rename(target, bakname)
3217 3205 if ui.verbose or not exact:
3218 3206 if not isinstance(msg, basestring):
3219 3207 msg = msg(abs)
3220 3208 ui.status(msg % rel)
3221 3209 elif exact:
3222 3210 ui.warn(msg % rel)
3223 3211 break
3224 3212
3225 3213 if not opts.get('dry_run'):
3226 3214 needdata = ('revert', 'add', 'undelete')
3227 3215 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3228 3216 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3229 3217
3230 3218 if targetsubs:
3231 3219 # Revert the subrepos on the revert list
3232 3220 for sub in targetsubs:
3233 3221 try:
3234 3222 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3235 3223 except KeyError:
3236 3224 raise error.Abort("subrepository '%s' does not exist in %s!"
3237 3225 % (sub, short(ctx.node())))
3238 3226
3239 3227 def _revertprefetch(repo, ctx, *files):
3240 3228 """Let extension changing the storage layer prefetch content"""
3241 3229 pass
3242 3230
3243 3231 def _performrevert(repo, parents, ctx, actions, interactive=False,
3244 3232 tobackup=None):
3245 3233 """function that actually perform all the actions computed for revert
3246 3234
3247 3235 This is an independent function to let extension to plug in and react to
3248 3236 the imminent revert.
3249 3237
3250 3238 Make sure you have the working directory locked when calling this function.
3251 3239 """
3252 3240 parent, p2 = parents
3253 3241 node = ctx.node()
3254 3242 excluded_files = []
3255 3243 matcher_opts = {"exclude": excluded_files}
3256 3244
3257 3245 def checkout(f):
3258 3246 fc = ctx[f]
3259 3247 repo.wwrite(f, fc.data(), fc.flags())
3260 3248
3261 3249 audit_path = pathutil.pathauditor(repo.root)
3262 3250 for f in actions['forget'][0]:
3263 3251 if interactive:
3264 3252 choice = \
3265 3253 repo.ui.promptchoice(
3266 3254 _("forget added file %s (yn)?$$ &Yes $$ &No")
3267 3255 % f)
3268 3256 if choice == 0:
3269 3257 repo.dirstate.drop(f)
3270 3258 else:
3271 3259 excluded_files.append(repo.wjoin(f))
3272 3260 else:
3273 3261 repo.dirstate.drop(f)
3274 3262 for f in actions['remove'][0]:
3275 3263 audit_path(f)
3276 3264 try:
3277 3265 util.unlinkpath(repo.wjoin(f))
3278 3266 except OSError:
3279 3267 pass
3280 3268 repo.dirstate.remove(f)
3281 3269 for f in actions['drop'][0]:
3282 3270 audit_path(f)
3283 3271 repo.dirstate.remove(f)
3284 3272
3285 3273 normal = None
3286 3274 if node == parent:
3287 3275 # We're reverting to our parent. If possible, we'd like status
3288 3276 # to report the file as clean. We have to use normallookup for
3289 3277 # merges to avoid losing information about merged/dirty files.
3290 3278 if p2 != nullid:
3291 3279 normal = repo.dirstate.normallookup
3292 3280 else:
3293 3281 normal = repo.dirstate.normal
3294 3282
3295 3283 newlyaddedandmodifiedfiles = set()
3296 3284 if interactive:
3297 3285 # Prompt the user for changes to revert
3298 3286 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3299 3287 m = scmutil.match(ctx, torevert, matcher_opts)
3300 3288 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3301 3289 diffopts.nodates = True
3302 3290 diffopts.git = True
3303 3291 reversehunks = repo.ui.configbool('experimental',
3304 3292 'revertalternateinteractivemode',
3305 3293 True)
3306 3294 if reversehunks:
3307 3295 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3308 3296 else:
3309 3297 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3310 3298 originalchunks = patch.parsepatch(diff)
3311 3299 operation = 'discard' if node == parent else 'revert'
3312 3300
3313 3301 try:
3314 3302
3315 3303 chunks, opts = recordfilter(repo.ui, originalchunks,
3316 3304 operation=operation)
3317 3305 if reversehunks:
3318 3306 chunks = patch.reversehunks(chunks)
3319 3307
3320 3308 except patch.PatchError as err:
3321 3309 raise error.Abort(_('error parsing patch: %s') % err)
3322 3310
3323 3311 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3324 3312 if tobackup is None:
3325 3313 tobackup = set()
3326 3314 # Apply changes
3327 3315 fp = stringio()
3328 3316 for c in chunks:
3329 3317 # Create a backup file only if this hunk should be backed up
3330 3318 if ishunk(c) and c.header.filename() in tobackup:
3331 3319 abs = c.header.filename()
3332 3320 target = repo.wjoin(abs)
3333 3321 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3334 3322 util.copyfile(target, bakname)
3335 3323 tobackup.remove(abs)
3336 3324 c.write(fp)
3337 3325 dopatch = fp.tell()
3338 3326 fp.seek(0)
3339 3327 if dopatch:
3340 3328 try:
3341 3329 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3342 3330 except patch.PatchError as err:
3343 3331 raise error.Abort(str(err))
3344 3332 del fp
3345 3333 else:
3346 3334 for f in actions['revert'][0]:
3347 3335 checkout(f)
3348 3336 if normal:
3349 3337 normal(f)
3350 3338
3351 3339 for f in actions['add'][0]:
3352 3340 # Don't checkout modified files, they are already created by the diff
3353 3341 if f not in newlyaddedandmodifiedfiles:
3354 3342 checkout(f)
3355 3343 repo.dirstate.add(f)
3356 3344
3357 3345 normal = repo.dirstate.normallookup
3358 3346 if node == parent and p2 == nullid:
3359 3347 normal = repo.dirstate.normal
3360 3348 for f in actions['undelete'][0]:
3361 3349 checkout(f)
3362 3350 normal(f)
3363 3351
3364 3352 copied = copies.pathcopies(repo[parent], ctx)
3365 3353
3366 3354 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3367 3355 if f in copied:
3368 3356 repo.dirstate.copy(copied[f], f)
3369 3357
3370 3358 def command(table):
3371 3359 """Returns a function object to be used as a decorator for making commands.
3372 3360
3373 3361 This function receives a command table as its argument. The table should
3374 3362 be a dict.
3375 3363
3376 3364 The returned function can be used as a decorator for adding commands
3377 3365 to that command table. This function accepts multiple arguments to define
3378 3366 a command.
3379 3367
3380 3368 The first argument is the command name.
3381 3369
3382 3370 The options argument is an iterable of tuples defining command arguments.
3383 3371 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3384 3372
3385 3373 The synopsis argument defines a short, one line summary of how to use the
3386 3374 command. This shows up in the help output.
3387 3375
3388 3376 The norepo argument defines whether the command does not require a
3389 3377 local repository. Most commands operate against a repository, thus the
3390 3378 default is False.
3391 3379
3392 3380 The optionalrepo argument defines whether the command optionally requires
3393 3381 a local repository.
3394 3382
3395 3383 The inferrepo argument defines whether to try to find a repository from the
3396 3384 command line arguments. If True, arguments will be examined for potential
3397 3385 repository locations. See ``findrepo()``. If a repository is found, it
3398 3386 will be used.
3399 3387 """
3400 3388 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3401 3389 inferrepo=False):
3402 3390 def decorator(func):
3403 3391 func.norepo = norepo
3404 3392 func.optionalrepo = optionalrepo
3405 3393 func.inferrepo = inferrepo
3406 3394 if synopsis:
3407 3395 table[name] = func, list(options), synopsis
3408 3396 else:
3409 3397 table[name] = func, list(options)
3410 3398 return func
3411 3399 return decorator
3412 3400
3413 3401 return cmd
3414 3402
3415 3403 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3416 3404 # commands.outgoing. "missing" is "missing" of the result of
3417 3405 # "findcommonoutgoing()"
3418 3406 outgoinghooks = util.hooks()
3419 3407
3420 3408 # a list of (ui, repo) functions called by commands.summary
3421 3409 summaryhooks = util.hooks()
3422 3410
3423 3411 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3424 3412 #
3425 3413 # functions should return tuple of booleans below, if 'changes' is None:
3426 3414 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3427 3415 #
3428 3416 # otherwise, 'changes' is a tuple of tuples below:
3429 3417 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3430 3418 # - (desturl, destbranch, destpeer, outgoing)
3431 3419 summaryremotehooks = util.hooks()
3432 3420
3433 3421 # A list of state files kept by multistep operations like graft.
3434 3422 # Since graft cannot be aborted, it is considered 'clearable' by update.
3435 3423 # note: bisect is intentionally excluded
3436 3424 # (state file, clearable, allowcommit, error, hint)
3437 3425 unfinishedstates = [
3438 3426 ('graftstate', True, False, _('graft in progress'),
3439 3427 _("use 'hg graft --continue' or 'hg update' to abort")),
3440 3428 ('updatestate', True, False, _('last update was interrupted'),
3441 3429 _("use 'hg update' to get a consistent checkout"))
3442 3430 ]
3443 3431
3444 3432 def checkunfinished(repo, commit=False):
3445 3433 '''Look for an unfinished multistep operation, like graft, and abort
3446 3434 if found. It's probably good to check this right before
3447 3435 bailifchanged().
3448 3436 '''
3449 3437 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3450 3438 if commit and allowcommit:
3451 3439 continue
3452 3440 if repo.vfs.exists(f):
3453 3441 raise error.Abort(msg, hint=hint)
3454 3442
3455 3443 def clearunfinished(repo):
3456 3444 '''Check for unfinished operations (as above), and clear the ones
3457 3445 that are clearable.
3458 3446 '''
3459 3447 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3460 3448 if not clearable and repo.vfs.exists(f):
3461 3449 raise error.Abort(msg, hint=hint)
3462 3450 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3463 3451 if clearable and repo.vfs.exists(f):
3464 3452 util.unlink(repo.join(f))
3465 3453
3466 3454 afterresolvedstates = [
3467 3455 ('graftstate',
3468 3456 _('hg graft --continue')),
3469 3457 ]
3470 3458
3471 3459 def howtocontinue(repo):
3472 3460 '''Check for an unfinished operation and return the command to finish
3473 3461 it.
3474 3462
3475 3463 afterresolvedstates tupples define a .hg/{file} and the corresponding
3476 3464 command needed to finish it.
3477 3465
3478 3466 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3479 3467 a boolean.
3480 3468 '''
3481 3469 contmsg = _("continue: %s")
3482 3470 for f, msg in afterresolvedstates:
3483 3471 if repo.vfs.exists(f):
3484 3472 return contmsg % msg, True
3485 3473 workingctx = repo[None]
3486 3474 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3487 3475 for s in workingctx.substate)
3488 3476 if dirty:
3489 3477 return contmsg % _("hg commit"), False
3490 3478 return None, None
3491 3479
3492 3480 def checkafterresolved(repo):
3493 3481 '''Inform the user about the next action after completing hg resolve
3494 3482
3495 3483 If there's a matching afterresolvedstates, howtocontinue will yield
3496 3484 repo.ui.warn as the reporter.
3497 3485
3498 3486 Otherwise, it will yield repo.ui.note.
3499 3487 '''
3500 3488 msg, warning = howtocontinue(repo)
3501 3489 if msg is not None:
3502 3490 if warning:
3503 3491 repo.ui.warn("%s\n" % msg)
3504 3492 else:
3505 3493 repo.ui.note("%s\n" % msg)
3506 3494
3507 3495 def wrongtooltocontinue(repo, task):
3508 3496 '''Raise an abort suggesting how to properly continue if there is an
3509 3497 active task.
3510 3498
3511 3499 Uses howtocontinue() to find the active task.
3512 3500
3513 3501 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3514 3502 a hint.
3515 3503 '''
3516 3504 after = howtocontinue(repo)
3517 3505 hint = None
3518 3506 if after[1]:
3519 3507 hint = after[0]
3520 3508 raise error.Abort(_('no %s in progress') % task, hint=hint)
3521 3509
3522 3510 class dirstateguard(object):
3523 3511 '''Restore dirstate at unexpected failure.
3524 3512
3525 3513 At the construction, this class does:
3526 3514
3527 3515 - write current ``repo.dirstate`` out, and
3528 3516 - save ``.hg/dirstate`` into the backup file
3529 3517
3530 3518 This restores ``.hg/dirstate`` from backup file, if ``release()``
3531 3519 is invoked before ``close()``.
3532 3520
3533 3521 This just removes the backup file at ``close()`` before ``release()``.
3534 3522 '''
3535 3523
3536 3524 def __init__(self, repo, name):
3537 3525 self._repo = repo
3538 3526 self._suffix = '.backup.%s.%d' % (name, id(self))
3539 3527 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3540 3528 self._active = True
3541 3529 self._closed = False
3542 3530
3543 3531 def __del__(self):
3544 3532 if self._active: # still active
3545 3533 # this may occur, even if this class is used correctly:
3546 3534 # for example, releasing other resources like transaction
3547 3535 # may raise exception before ``dirstateguard.release`` in
3548 3536 # ``release(tr, ....)``.
3549 3537 self._abort()
3550 3538
3551 3539 def close(self):
3552 3540 if not self._active: # already inactivated
3553 3541 msg = (_("can't close already inactivated backup: dirstate%s")
3554 3542 % self._suffix)
3555 3543 raise error.Abort(msg)
3556 3544
3557 3545 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3558 3546 self._suffix)
3559 3547 self._active = False
3560 3548 self._closed = True
3561 3549
3562 3550 def _abort(self):
3563 3551 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3564 3552 self._suffix)
3565 3553 self._active = False
3566 3554
3567 3555 def release(self):
3568 3556 if not self._closed:
3569 3557 if not self._active: # already inactivated
3570 3558 msg = (_("can't release already inactivated backup:"
3571 3559 " dirstate%s")
3572 3560 % self._suffix)
3573 3561 raise error.Abort(msg)
3574 3562 self._abort()
@@ -1,712 +1,716 b''
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import os
12 12 import re
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 error,
17 17 pathutil,
18 18 util,
19 19 )
20 20
21 21 propertycache = util.propertycache
22 22
23 23 def _rematcher(regex):
24 24 '''compile the regexp with the best available regexp engine and return a
25 25 matcher function'''
26 26 m = util.re.compile(regex)
27 27 try:
28 28 # slightly faster, provided by facebook's re2 bindings
29 29 return m.test_match
30 30 except AttributeError:
31 31 return m.match
32 32
33 33 def _expandsets(kindpats, ctx, listsubrepos):
34 34 '''Returns the kindpats list with the 'set' patterns expanded.'''
35 35 fset = set()
36 36 other = []
37 37
38 38 for kind, pat, source in kindpats:
39 39 if kind == 'set':
40 40 if not ctx:
41 41 raise error.Abort(_("fileset expression with no context"))
42 42 s = ctx.getfileset(pat)
43 43 fset.update(s)
44 44
45 45 if listsubrepos:
46 46 for subpath in ctx.substate:
47 47 s = ctx.sub(subpath).getfileset(pat)
48 48 fset.update(subpath + '/' + f for f in s)
49 49
50 50 continue
51 51 other.append((kind, pat, source))
52 52 return fset, other
53 53
54 54 def _expandsubinclude(kindpats, root):
55 55 '''Returns the list of subinclude matchers and the kindpats without the
56 56 subincludes in it.'''
57 57 relmatchers = []
58 58 other = []
59 59
60 60 for kind, pat, source in kindpats:
61 61 if kind == 'subinclude':
62 62 sourceroot = pathutil.dirname(util.normpath(source))
63 63 pat = util.pconvert(pat)
64 64 path = pathutil.join(sourceroot, pat)
65 65
66 66 newroot = pathutil.dirname(path)
67 67 relmatcher = match(newroot, '', [], ['include:%s' % path])
68 68
69 69 prefix = pathutil.canonpath(root, root, newroot)
70 70 if prefix:
71 71 prefix += '/'
72 72 relmatchers.append((prefix, relmatcher))
73 73 else:
74 74 other.append((kind, pat, source))
75 75
76 76 return relmatchers, other
77 77
78 78 def _kindpatsalwaysmatch(kindpats):
79 79 """"Checks whether the kindspats match everything, as e.g.
80 80 'relpath:.' does.
81 81 """
82 82 for kind, pat, source in kindpats:
83 83 if pat != '' or kind not in ['relpath', 'glob']:
84 84 return False
85 85 return True
86 86
87 87 class match(object):
88 88 def __init__(self, root, cwd, patterns, include=[], exclude=[],
89 89 default='glob', exact=False, auditor=None, ctx=None,
90 90 listsubrepos=False, warn=None, badfn=None):
91 91 """build an object to match a set of file patterns
92 92
93 93 arguments:
94 94 root - the canonical root of the tree you're matching against
95 95 cwd - the current working directory, if relevant
96 96 patterns - patterns to find
97 97 include - patterns to include (unless they are excluded)
98 98 exclude - patterns to exclude (even if they are included)
99 99 default - if a pattern in patterns has no explicit type, assume this one
100 100 exact - patterns are actually filenames (include/exclude still apply)
101 101 warn - optional function used for printing warnings
102 102 badfn - optional bad() callback for this matcher instead of the default
103 103
104 104 a pattern is one of:
105 105 'glob:<glob>' - a glob relative to cwd
106 106 're:<regexp>' - a regular expression
107 107 'path:<path>' - a path relative to repository root
108 108 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
109 109 'relpath:<path>' - a path relative to cwd
110 110 'relre:<regexp>' - a regexp that needn't match the start of a name
111 111 'set:<fileset>' - a fileset expression
112 112 'include:<path>' - a file of patterns to read and include
113 113 'subinclude:<path>' - a file of patterns to match against files under
114 114 the same directory
115 115 '<something>' - a pattern of the specified default type
116 116 """
117 117
118 118 self._root = root
119 119 self._cwd = cwd
120 120 self._files = [] # exact files and roots of patterns
121 121 self._anypats = bool(include or exclude)
122 122 self._always = False
123 123 self._pathrestricted = bool(include or exclude or patterns)
124 124 self._warn = warn
125 125 self._includeroots = set()
126 126 self._includedirs = set(['.'])
127 127 self._excluderoots = set()
128 128
129 129 if badfn is not None:
130 130 self.bad = badfn
131 131
132 132 matchfns = []
133 133 if include:
134 134 kindpats = self._normalize(include, 'glob', root, cwd, auditor)
135 135 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
136 136 listsubrepos, root)
137 137 self._includeroots.update(_roots(kindpats))
138 138 self._includedirs.update(util.dirs(self._includeroots))
139 139 matchfns.append(im)
140 140 if exclude:
141 141 kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
142 142 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
143 143 listsubrepos, root)
144 144 if not _anypats(kindpats):
145 145 self._excluderoots.update(_roots(kindpats))
146 146 matchfns.append(lambda f: not em(f))
147 147 if exact:
148 148 if isinstance(patterns, list):
149 149 self._files = patterns
150 150 else:
151 151 self._files = list(patterns)
152 152 matchfns.append(self.exact)
153 153 elif patterns:
154 154 kindpats = self._normalize(patterns, default, root, cwd, auditor)
155 155 if not _kindpatsalwaysmatch(kindpats):
156 156 self._files = _roots(kindpats)
157 157 self._anypats = self._anypats or _anypats(kindpats)
158 158 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
159 159 listsubrepos, root)
160 160 matchfns.append(pm)
161 161
162 162 if not matchfns:
163 163 m = util.always
164 164 self._always = True
165 165 elif len(matchfns) == 1:
166 166 m = matchfns[0]
167 167 else:
168 168 def m(f):
169 169 for matchfn in matchfns:
170 170 if not matchfn(f):
171 171 return False
172 172 return True
173 173
174 174 self.matchfn = m
175 175 self._fileroots = set(self._files)
176 176
177 177 def __call__(self, fn):
178 178 return self.matchfn(fn)
179 179 def __iter__(self):
180 180 for f in self._files:
181 181 yield f
182 182
183 183 # Callbacks related to how the matcher is used by dirstate.walk.
184 184 # Subscribers to these events must monkeypatch the matcher object.
185 185 def bad(self, f, msg):
186 186 '''Callback from dirstate.walk for each explicit file that can't be
187 187 found/accessed, with an error message.'''
188 188 pass
189 189
190 190 # If an explicitdir is set, it will be called when an explicitly listed
191 191 # directory is visited.
192 192 explicitdir = None
193 193
194 194 # If an traversedir is set, it will be called when a directory discovered
195 195 # by recursive traversal is visited.
196 196 traversedir = None
197 197
198 198 def abs(self, f):
199 199 '''Convert a repo path back to path that is relative to the root of the
200 200 matcher.'''
201 201 return f
202 202
203 203 def rel(self, f):
204 204 '''Convert repo path back to path that is relative to cwd of matcher.'''
205 205 return util.pathto(self._root, self._cwd, f)
206 206
207 207 def uipath(self, f):
208 208 '''Convert repo path to a display path. If patterns or -I/-X were used
209 209 to create this matcher, the display path will be relative to cwd.
210 210 Otherwise it is relative to the root of the repo.'''
211 211 return (self._pathrestricted and self.rel(f)) or self.abs(f)
212 212
213 213 def files(self):
214 214 '''Explicitly listed files or patterns or roots:
215 215 if no patterns or .always(): empty list,
216 216 if exact: list exact files,
217 217 if not .anypats(): list all files and dirs,
218 218 else: optimal roots'''
219 219 return self._files
220 220
221 221 @propertycache
222 222 def _dirs(self):
223 223 return set(util.dirs(self._fileroots)) | set(['.'])
224 224
225 225 def visitdir(self, dir):
226 226 '''Decides whether a directory should be visited based on whether it
227 227 has potential matches in it or one of its subdirectories. This is
228 228 based on the match's primary, included, and excluded patterns.
229 229
230 230 Returns the string 'all' if the given directory and all subdirectories
231 231 should be visited. Otherwise returns True or False indicating whether
232 232 the given directory should be visited.
233 233
234 234 This function's behavior is undefined if it has returned False for
235 235 one of the dir's parent directories.
236 236 '''
237 237 if self.prefix() and dir in self._fileroots:
238 238 return 'all'
239 239 if dir in self._excluderoots:
240 240 return False
241 241 if (self._includeroots and
242 242 '.' not in self._includeroots and
243 243 dir not in self._includeroots and
244 244 dir not in self._includedirs and
245 245 not any(parent in self._includeroots
246 246 for parent in util.finddirs(dir))):
247 247 return False
248 248 return (not self._fileroots or
249 249 '.' in self._fileroots or
250 250 dir in self._fileroots or
251 251 dir in self._dirs or
252 252 any(parentdir in self._fileroots
253 253 for parentdir in util.finddirs(dir)))
254 254
255 255 def exact(self, f):
256 256 '''Returns True if f is in .files().'''
257 257 return f in self._fileroots
258 258
259 259 def anypats(self):
260 260 '''Matcher uses patterns or include/exclude.'''
261 261 return self._anypats
262 262
263 263 def always(self):
264 264 '''Matcher will match everything and .files() will be empty
265 265 - optimization might be possible and necessary.'''
266 266 return self._always
267 267
268 268 def ispartial(self):
269 269 '''True if the matcher won't always match.
270 270
271 271 Although it's just the inverse of _always in this implementation,
272 272 an extension such as narrowhg might make it return something
273 273 slightly different.'''
274 274 return not self._always
275 275
276 276 def isexact(self):
277 277 return self.matchfn == self.exact
278 278
279 279 def prefix(self):
280 280 return not self.always() and not self.isexact() and not self.anypats()
281 281
282 282 def _normalize(self, patterns, default, root, cwd, auditor):
283 283 '''Convert 'kind:pat' from the patterns list to tuples with kind and
284 284 normalized and rooted patterns and with listfiles expanded.'''
285 285 kindpats = []
286 286 for kind, pat in [_patsplit(p, default) for p in patterns]:
287 287 if kind in ('glob', 'relpath'):
288 288 pat = pathutil.canonpath(root, cwd, pat, auditor)
289 289 elif kind in ('relglob', 'path'):
290 290 pat = util.normpath(pat)
291 291 elif kind in ('listfile', 'listfile0'):
292 292 try:
293 293 files = util.readfile(pat)
294 294 if kind == 'listfile0':
295 295 files = files.split('\0')
296 296 else:
297 297 files = files.splitlines()
298 298 files = [f for f in files if f]
299 299 except EnvironmentError:
300 300 raise error.Abort(_("unable to read file list (%s)") % pat)
301 301 for k, p, source in self._normalize(files, default, root, cwd,
302 302 auditor):
303 303 kindpats.append((k, p, pat))
304 304 continue
305 305 elif kind == 'include':
306 306 try:
307 307 fullpath = os.path.join(root, util.localpath(pat))
308 308 includepats = readpatternfile(fullpath, self._warn)
309 309 for k, p, source in self._normalize(includepats, default,
310 310 root, cwd, auditor):
311 311 kindpats.append((k, p, source or pat))
312 312 except error.Abort as inst:
313 313 raise error.Abort('%s: %s' % (pat, inst[0]))
314 314 except IOError as inst:
315 315 if self._warn:
316 316 self._warn(_("skipping unreadable pattern file "
317 317 "'%s': %s\n") % (pat, inst.strerror))
318 318 continue
319 319 # else: re or relre - which cannot be normalized
320 320 kindpats.append((kind, pat, ''))
321 321 return kindpats
322 322
323 def matchessubrepo(self, subpath):
324 return (self.exact(subpath)
325 or any(f.startswith(subpath + '/') for f in self.files()))
326
323 327 def exact(root, cwd, files, badfn=None):
324 328 return match(root, cwd, files, exact=True, badfn=badfn)
325 329
326 330 def always(root, cwd):
327 331 return match(root, cwd, [])
328 332
329 333 def badmatch(match, badfn):
330 334 """Make a copy of the given matcher, replacing its bad method with the given
331 335 one.
332 336 """
333 337 m = copy.copy(match)
334 338 m.bad = badfn
335 339 return m
336 340
337 341 class subdirmatcher(match):
338 342 """Adapt a matcher to work on a subdirectory only.
339 343
340 344 The paths are remapped to remove/insert the path as needed:
341 345
342 346 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
343 347 >>> m2 = subdirmatcher('sub', m1)
344 348 >>> bool(m2('a.txt'))
345 349 False
346 350 >>> bool(m2('b.txt'))
347 351 True
348 352 >>> bool(m2.matchfn('a.txt'))
349 353 False
350 354 >>> bool(m2.matchfn('b.txt'))
351 355 True
352 356 >>> m2.files()
353 357 ['b.txt']
354 358 >>> m2.exact('b.txt')
355 359 True
356 360 >>> util.pconvert(m2.rel('b.txt'))
357 361 'sub/b.txt'
358 362 >>> def bad(f, msg):
359 363 ... print "%s: %s" % (f, msg)
360 364 >>> m1.bad = bad
361 365 >>> m2.bad('x.txt', 'No such file')
362 366 sub/x.txt: No such file
363 367 >>> m2.abs('c.txt')
364 368 'sub/c.txt'
365 369 """
366 370
367 371 def __init__(self, path, matcher):
368 372 self._root = matcher._root
369 373 self._cwd = matcher._cwd
370 374 self._path = path
371 375 self._matcher = matcher
372 376 self._always = matcher._always
373 377 self._pathrestricted = matcher._pathrestricted
374 378
375 379 self._files = [f[len(path) + 1:] for f in matcher._files
376 380 if f.startswith(path + "/")]
377 381
378 382 # If the parent repo had a path to this subrepo and no patterns are
379 383 # specified, this submatcher always matches.
380 384 if not self._always and not matcher._anypats:
381 385 self._always = any(f == path for f in matcher._files)
382 386
383 387 self._anypats = matcher._anypats
384 388 # Some information is lost in the superclass's constructor, so we
385 389 # can not accurately create the matching function for the subdirectory
386 390 # from the inputs. Instead, we override matchfn() and visitdir() to
387 391 # call the original matcher with the subdirectory path prepended.
388 392 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
389 393 def visitdir(dir):
390 394 if dir == '.':
391 395 return matcher.visitdir(self._path)
392 396 return matcher.visitdir(self._path + "/" + dir)
393 397 self.visitdir = visitdir
394 398 self._fileroots = set(self._files)
395 399
396 400 def abs(self, f):
397 401 return self._matcher.abs(self._path + "/" + f)
398 402
399 403 def bad(self, f, msg):
400 404 self._matcher.bad(self._path + "/" + f, msg)
401 405
402 406 def rel(self, f):
403 407 return self._matcher.rel(self._path + "/" + f)
404 408
405 409 class icasefsmatcher(match):
406 410 """A matcher for wdir on case insensitive filesystems, which normalizes the
407 411 given patterns to the case in the filesystem.
408 412 """
409 413
410 414 def __init__(self, root, cwd, patterns, include, exclude, default, auditor,
411 415 ctx, listsubrepos=False, badfn=None):
412 416 init = super(icasefsmatcher, self).__init__
413 417 self._dirstate = ctx.repo().dirstate
414 418 self._dsnormalize = self._dirstate.normalize
415 419
416 420 init(root, cwd, patterns, include, exclude, default, auditor=auditor,
417 421 ctx=ctx, listsubrepos=listsubrepos, badfn=badfn)
418 422
419 423 # m.exact(file) must be based off of the actual user input, otherwise
420 424 # inexact case matches are treated as exact, and not noted without -v.
421 425 if self._files:
422 426 self._fileroots = set(_roots(self._kp))
423 427
424 428 def _normalize(self, patterns, default, root, cwd, auditor):
425 429 self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
426 430 root, cwd, auditor)
427 431 kindpats = []
428 432 for kind, pats, source in self._kp:
429 433 if kind not in ('re', 'relre'): # regex can't be normalized
430 434 p = pats
431 435 pats = self._dsnormalize(pats)
432 436
433 437 # Preserve the original to handle a case only rename.
434 438 if p != pats and p in self._dirstate:
435 439 kindpats.append((kind, p, source))
436 440
437 441 kindpats.append((kind, pats, source))
438 442 return kindpats
439 443
440 444 def patkind(pattern, default=None):
441 445 '''If pattern is 'kind:pat' with a known kind, return kind.'''
442 446 return _patsplit(pattern, default)[0]
443 447
444 448 def _patsplit(pattern, default):
445 449 """Split a string into the optional pattern kind prefix and the actual
446 450 pattern."""
447 451 if ':' in pattern:
448 452 kind, pat = pattern.split(':', 1)
449 453 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
450 454 'listfile', 'listfile0', 'set', 'include', 'subinclude'):
451 455 return kind, pat
452 456 return default, pattern
453 457
454 458 def _globre(pat):
455 459 r'''Convert an extended glob string to a regexp string.
456 460
457 461 >>> print _globre(r'?')
458 462 .
459 463 >>> print _globre(r'*')
460 464 [^/]*
461 465 >>> print _globre(r'**')
462 466 .*
463 467 >>> print _globre(r'**/a')
464 468 (?:.*/)?a
465 469 >>> print _globre(r'a/**/b')
466 470 a\/(?:.*/)?b
467 471 >>> print _globre(r'[a*?!^][^b][!c]')
468 472 [a*?!^][\^b][^c]
469 473 >>> print _globre(r'{a,b}')
470 474 (?:a|b)
471 475 >>> print _globre(r'.\*\?')
472 476 \.\*\?
473 477 '''
474 478 i, n = 0, len(pat)
475 479 res = ''
476 480 group = 0
477 481 escape = util.re.escape
478 482 def peek():
479 483 return i < n and pat[i]
480 484 while i < n:
481 485 c = pat[i]
482 486 i += 1
483 487 if c not in '*?[{},\\':
484 488 res += escape(c)
485 489 elif c == '*':
486 490 if peek() == '*':
487 491 i += 1
488 492 if peek() == '/':
489 493 i += 1
490 494 res += '(?:.*/)?'
491 495 else:
492 496 res += '.*'
493 497 else:
494 498 res += '[^/]*'
495 499 elif c == '?':
496 500 res += '.'
497 501 elif c == '[':
498 502 j = i
499 503 if j < n and pat[j] in '!]':
500 504 j += 1
501 505 while j < n and pat[j] != ']':
502 506 j += 1
503 507 if j >= n:
504 508 res += '\\['
505 509 else:
506 510 stuff = pat[i:j].replace('\\','\\\\')
507 511 i = j + 1
508 512 if stuff[0] == '!':
509 513 stuff = '^' + stuff[1:]
510 514 elif stuff[0] == '^':
511 515 stuff = '\\' + stuff
512 516 res = '%s[%s]' % (res, stuff)
513 517 elif c == '{':
514 518 group += 1
515 519 res += '(?:'
516 520 elif c == '}' and group:
517 521 res += ')'
518 522 group -= 1
519 523 elif c == ',' and group:
520 524 res += '|'
521 525 elif c == '\\':
522 526 p = peek()
523 527 if p:
524 528 i += 1
525 529 res += escape(p)
526 530 else:
527 531 res += escape(c)
528 532 else:
529 533 res += escape(c)
530 534 return res
531 535
532 536 def _regex(kind, pat, globsuffix):
533 537 '''Convert a (normalized) pattern of any kind into a regular expression.
534 538 globsuffix is appended to the regexp of globs.'''
535 539 if not pat:
536 540 return ''
537 541 if kind == 're':
538 542 return pat
539 543 if kind == 'path':
540 544 if pat == '.':
541 545 return ''
542 546 return '^' + util.re.escape(pat) + '(?:/|$)'
543 547 if kind == 'relglob':
544 548 return '(?:|.*/)' + _globre(pat) + globsuffix
545 549 if kind == 'relpath':
546 550 return util.re.escape(pat) + '(?:/|$)'
547 551 if kind == 'relre':
548 552 if pat.startswith('^'):
549 553 return pat
550 554 return '.*' + pat
551 555 return _globre(pat) + globsuffix
552 556
553 557 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos, root):
554 558 '''Return regexp string and a matcher function for kindpats.
555 559 globsuffix is appended to the regexp of globs.'''
556 560 matchfuncs = []
557 561
558 562 subincludes, kindpats = _expandsubinclude(kindpats, root)
559 563 if subincludes:
560 564 def matchsubinclude(f):
561 565 for prefix, mf in subincludes:
562 566 if f.startswith(prefix) and mf(f[len(prefix):]):
563 567 return True
564 568 return False
565 569 matchfuncs.append(matchsubinclude)
566 570
567 571 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
568 572 if fset:
569 573 matchfuncs.append(fset.__contains__)
570 574
571 575 regex = ''
572 576 if kindpats:
573 577 regex, mf = _buildregexmatch(kindpats, globsuffix)
574 578 matchfuncs.append(mf)
575 579
576 580 if len(matchfuncs) == 1:
577 581 return regex, matchfuncs[0]
578 582 else:
579 583 return regex, lambda f: any(mf(f) for mf in matchfuncs)
580 584
581 585 def _buildregexmatch(kindpats, globsuffix):
582 586 """Build a match function from a list of kinds and kindpats,
583 587 return regexp string and a matcher function."""
584 588 try:
585 589 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
586 590 for (k, p, s) in kindpats])
587 591 if len(regex) > 20000:
588 592 raise OverflowError
589 593 return regex, _rematcher(regex)
590 594 except OverflowError:
591 595 # We're using a Python with a tiny regex engine and we
592 596 # made it explode, so we'll divide the pattern list in two
593 597 # until it works
594 598 l = len(kindpats)
595 599 if l < 2:
596 600 raise
597 601 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
598 602 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
599 603 return regex, lambda s: a(s) or b(s)
600 604 except re.error:
601 605 for k, p, s in kindpats:
602 606 try:
603 607 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
604 608 except re.error:
605 609 if s:
606 610 raise error.Abort(_("%s: invalid pattern (%s): %s") %
607 611 (s, k, p))
608 612 else:
609 613 raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
610 614 raise error.Abort(_("invalid pattern"))
611 615
612 616 def _roots(kindpats):
613 617 '''return roots and exact explicitly listed files from patterns
614 618
615 619 >>> _roots([('glob', 'g/*', ''), ('glob', 'g', ''), ('glob', 'g*', '')])
616 620 ['g', 'g', '.']
617 621 >>> _roots([('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
618 622 ['r', 'p/p', '.']
619 623 >>> _roots([('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
620 624 ['.', '.', '.']
621 625 '''
622 626 r = []
623 627 for kind, pat, source in kindpats:
624 628 if kind == 'glob': # find the non-glob prefix
625 629 root = []
626 630 for p in pat.split('/'):
627 631 if '[' in p or '{' in p or '*' in p or '?' in p:
628 632 break
629 633 root.append(p)
630 634 r.append('/'.join(root) or '.')
631 635 elif kind in ('relpath', 'path'):
632 636 r.append(pat or '.')
633 637 else: # relglob, re, relre
634 638 r.append('.')
635 639 return r
636 640
637 641 def _anypats(kindpats):
638 642 for kind, pat, source in kindpats:
639 643 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
640 644 return True
641 645
642 646 _commentre = None
643 647
644 648 def readpatternfile(filepath, warn, sourceinfo=False):
645 649 '''parse a pattern file, returning a list of
646 650 patterns. These patterns should be given to compile()
647 651 to be validated and converted into a match function.
648 652
649 653 trailing white space is dropped.
650 654 the escape character is backslash.
651 655 comments start with #.
652 656 empty lines are skipped.
653 657
654 658 lines can be of the following formats:
655 659
656 660 syntax: regexp # defaults following lines to non-rooted regexps
657 661 syntax: glob # defaults following lines to non-rooted globs
658 662 re:pattern # non-rooted regular expression
659 663 glob:pattern # non-rooted glob
660 664 pattern # pattern of the current default type
661 665
662 666 if sourceinfo is set, returns a list of tuples:
663 667 (pattern, lineno, originalline). This is useful to debug ignore patterns.
664 668 '''
665 669
666 670 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:',
667 671 'include': 'include', 'subinclude': 'subinclude'}
668 672 syntax = 'relre:'
669 673 patterns = []
670 674
671 675 fp = open(filepath)
672 676 for lineno, line in enumerate(fp, start=1):
673 677 if "#" in line:
674 678 global _commentre
675 679 if not _commentre:
676 680 _commentre = util.re.compile(r'((?:^|[^\\])(?:\\\\)*)#.*')
677 681 # remove comments prefixed by an even number of escapes
678 682 m = _commentre.search(line)
679 683 if m:
680 684 line = line[:m.end(1)]
681 685 # fixup properly escaped comments that survived the above
682 686 line = line.replace("\\#", "#")
683 687 line = line.rstrip()
684 688 if not line:
685 689 continue
686 690
687 691 if line.startswith('syntax:'):
688 692 s = line[7:].strip()
689 693 try:
690 694 syntax = syntaxes[s]
691 695 except KeyError:
692 696 if warn:
693 697 warn(_("%s: ignoring invalid syntax '%s'\n") %
694 698 (filepath, s))
695 699 continue
696 700
697 701 linesyntax = syntax
698 702 for s, rels in syntaxes.iteritems():
699 703 if line.startswith(rels):
700 704 linesyntax = rels
701 705 line = line[len(rels):]
702 706 break
703 707 elif line.startswith(s+':'):
704 708 linesyntax = rels
705 709 line = line[len(s) + 1:]
706 710 break
707 711 if sourceinfo:
708 712 patterns.append((linesyntax + line, lineno, line))
709 713 else:
710 714 patterns.append(linesyntax + line)
711 715 fp.close()
712 716 return patterns
@@ -1,1429 +1,1421 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 @util.propertycache
260 260 def open(self):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 return self.__call__
268 268
269 269 def read(self, path):
270 270 with self(path, 'rb') as fp:
271 271 return fp.read()
272 272
273 273 def readlines(self, path, mode='rb'):
274 274 with self(path, mode=mode) as fp:
275 275 return fp.readlines()
276 276
277 277 def write(self, path, data, backgroundclose=False):
278 278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 279 return fp.write(data)
280 280
281 281 def writelines(self, path, data, mode='wb', notindexed=False):
282 282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 283 return fp.writelines(data)
284 284
285 285 def append(self, path, data):
286 286 with self(path, 'ab') as fp:
287 287 return fp.write(data)
288 288
289 289 def basename(self, path):
290 290 """return base element of a path (as os.path.basename would do)
291 291
292 292 This exists to allow handling of strange encoding if needed."""
293 293 return os.path.basename(path)
294 294
295 295 def chmod(self, path, mode):
296 296 return os.chmod(self.join(path), mode)
297 297
298 298 def dirname(self, path):
299 299 """return dirname element of a path (as os.path.dirname would do)
300 300
301 301 This exists to allow handling of strange encoding if needed."""
302 302 return os.path.dirname(path)
303 303
304 304 def exists(self, path=None):
305 305 return os.path.exists(self.join(path))
306 306
307 307 def fstat(self, fp):
308 308 return util.fstat(fp)
309 309
310 310 def isdir(self, path=None):
311 311 return os.path.isdir(self.join(path))
312 312
313 313 def isfile(self, path=None):
314 314 return os.path.isfile(self.join(path))
315 315
316 316 def islink(self, path=None):
317 317 return os.path.islink(self.join(path))
318 318
319 319 def isfileorlink(self, path=None):
320 320 '''return whether path is a regular file or a symlink
321 321
322 322 Unlike isfile, this doesn't follow symlinks.'''
323 323 try:
324 324 st = self.lstat(path)
325 325 except OSError:
326 326 return False
327 327 mode = st.st_mode
328 328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 329
330 330 def reljoin(self, *paths):
331 331 """join various elements of a path together (as os.path.join would do)
332 332
333 333 The vfs base is not injected so that path stay relative. This exists
334 334 to allow handling of strange encoding if needed."""
335 335 return os.path.join(*paths)
336 336
337 337 def split(self, path):
338 338 """split top-most element of a path (as os.path.split would do)
339 339
340 340 This exists to allow handling of strange encoding if needed."""
341 341 return os.path.split(path)
342 342
343 343 def lexists(self, path=None):
344 344 return os.path.lexists(self.join(path))
345 345
346 346 def lstat(self, path=None):
347 347 return os.lstat(self.join(path))
348 348
349 349 def listdir(self, path=None):
350 350 return os.listdir(self.join(path))
351 351
352 352 def makedir(self, path=None, notindexed=True):
353 353 return util.makedir(self.join(path), notindexed)
354 354
355 355 def makedirs(self, path=None, mode=None):
356 356 return util.makedirs(self.join(path), mode)
357 357
358 358 def makelock(self, info, path):
359 359 return util.makelock(info, self.join(path))
360 360
361 361 def mkdir(self, path=None):
362 362 return os.mkdir(self.join(path))
363 363
364 364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 366 dir=self.join(dir), text=text)
367 367 dname, fname = util.split(name)
368 368 if dir:
369 369 return fd, os.path.join(dir, fname)
370 370 else:
371 371 return fd, fname
372 372
373 373 def readdir(self, path=None, stat=None, skip=None):
374 374 return osutil.listdir(self.join(path), stat, skip)
375 375
376 376 def readlock(self, path):
377 377 return util.readlock(self.join(path))
378 378
379 379 def rename(self, src, dst, checkambig=False):
380 380 """Rename from src to dst
381 381
382 382 checkambig argument is used with util.filestat, and is useful
383 383 only if destination file is guarded by any lock
384 384 (e.g. repo.lock or repo.wlock).
385 385 """
386 386 dstpath = self.join(dst)
387 387 oldstat = checkambig and util.filestat(dstpath)
388 388 if oldstat and oldstat.stat:
389 389 ret = util.rename(self.join(src), dstpath)
390 390 newstat = util.filestat(dstpath)
391 391 if newstat.isambig(oldstat):
392 392 # stat of renamed file is ambiguous to original one
393 393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
394 394 os.utime(dstpath, (advanced, advanced))
395 395 return ret
396 396 return util.rename(self.join(src), dstpath)
397 397
398 398 def readlink(self, path):
399 399 return os.readlink(self.join(path))
400 400
401 401 def removedirs(self, path=None):
402 402 """Remove a leaf directory and all empty intermediate ones
403 403 """
404 404 return util.removedirs(self.join(path))
405 405
406 406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 407 """Remove a directory tree recursively
408 408
409 409 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 410 """
411 411 if forcibly:
412 412 def onerror(function, path, excinfo):
413 413 if function is not os.remove:
414 414 raise
415 415 # read-only files cannot be unlinked under Windows
416 416 s = os.stat(path)
417 417 if (s.st_mode & stat.S_IWRITE) != 0:
418 418 raise
419 419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 420 os.remove(path)
421 421 else:
422 422 onerror = None
423 423 return shutil.rmtree(self.join(path),
424 424 ignore_errors=ignore_errors, onerror=onerror)
425 425
426 426 def setflags(self, path, l, x):
427 427 return util.setflags(self.join(path), l, x)
428 428
429 429 def stat(self, path=None):
430 430 return os.stat(self.join(path))
431 431
432 432 def unlink(self, path=None):
433 433 return util.unlink(self.join(path))
434 434
435 435 def unlinkpath(self, path=None, ignoremissing=False):
436 436 return util.unlinkpath(self.join(path), ignoremissing)
437 437
438 438 def utime(self, path=None, t=None):
439 439 return os.utime(self.join(path), t)
440 440
441 441 def walk(self, path=None, onerror=None):
442 442 """Yield (dirpath, dirs, files) tuple for each directories under path
443 443
444 444 ``dirpath`` is relative one from the root of this vfs. This
445 445 uses ``os.sep`` as path separator, even you specify POSIX
446 446 style ``path``.
447 447
448 448 "The root of this vfs" is represented as empty ``dirpath``.
449 449 """
450 450 root = os.path.normpath(self.join(None))
451 451 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 452 # because len(dirpath) < prefixlen.
453 453 prefixlen = len(pathutil.normasprefix(root))
454 454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 455 yield (dirpath[prefixlen:], dirs, files)
456 456
457 457 @contextlib.contextmanager
458 458 def backgroundclosing(self, ui, expectedcount=-1):
459 459 """Allow files to be closed asynchronously.
460 460
461 461 When this context manager is active, ``backgroundclose`` can be passed
462 462 to ``__call__``/``open`` to result in the file possibly being closed
463 463 asynchronously, on a background thread.
464 464 """
465 465 # This is an arbitrary restriction and could be changed if we ever
466 466 # have a use case.
467 467 vfs = getattr(self, 'vfs', self)
468 468 if getattr(vfs, '_backgroundfilecloser', None):
469 469 raise error.Abort(
470 470 _('can only have 1 active background file closer'))
471 471
472 472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 473 try:
474 474 vfs._backgroundfilecloser = bfc
475 475 yield bfc
476 476 finally:
477 477 vfs._backgroundfilecloser = None
478 478
479 479 class vfs(abstractvfs):
480 480 '''Operate files relative to a base directory
481 481
482 482 This class is used to hide the details of COW semantics and
483 483 remote file access from higher level code.
484 484 '''
485 485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 486 if expandpath:
487 487 base = util.expandpath(base)
488 488 if realpath:
489 489 base = os.path.realpath(base)
490 490 self.base = base
491 491 self.mustaudit = audit
492 492 self.createmode = None
493 493 self._trustnlink = None
494 494
495 495 @property
496 496 def mustaudit(self):
497 497 return self._audit
498 498
499 499 @mustaudit.setter
500 500 def mustaudit(self, onoff):
501 501 self._audit = onoff
502 502 if onoff:
503 503 self.audit = pathutil.pathauditor(self.base)
504 504 else:
505 505 self.audit = util.always
506 506
507 507 @util.propertycache
508 508 def _cansymlink(self):
509 509 return util.checklink(self.base)
510 510
511 511 @util.propertycache
512 512 def _chmod(self):
513 513 return util.checkexec(self.base)
514 514
515 515 def _fixfilemode(self, name):
516 516 if self.createmode is None or not self._chmod:
517 517 return
518 518 os.chmod(name, self.createmode & 0o666)
519 519
520 520 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 521 notindexed=False, backgroundclose=False, checkambig=False):
522 522 '''Open ``path`` file, which is relative to vfs root.
523 523
524 524 Newly created directories are marked as "not to be indexed by
525 525 the content indexing service", if ``notindexed`` is specified
526 526 for "write" mode access.
527 527
528 528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 529 It can only be used if the ``self.backgroundclosing()`` context manager
530 530 is active. This should only be specified if the following criteria hold:
531 531
532 532 1. There is a potential for writing thousands of files. Unless you
533 533 are writing thousands of files, the performance benefits of
534 534 asynchronously closing files is not realized.
535 535 2. Files are opened exactly once for the ``backgroundclosing``
536 536 active duration and are therefore free of race conditions between
537 537 closing a file on a background thread and reopening it. (If the
538 538 file were opened multiple times, there could be unflushed data
539 539 because the original file handle hasn't been flushed/closed yet.)
540 540
541 541 ``checkambig`` argument is passed to atomictemplfile (valid
542 542 only for writing), and is useful only if target file is
543 543 guarded by any lock (e.g. repo.lock or repo.wlock).
544 544 '''
545 545 if self._audit:
546 546 r = util.checkosfilename(path)
547 547 if r:
548 548 raise error.Abort("%s: %r" % (r, path))
549 549 self.audit(path)
550 550 f = self.join(path)
551 551
552 552 if not text and "b" not in mode:
553 553 mode += "b" # for that other OS
554 554
555 555 nlink = -1
556 556 if mode not in ('r', 'rb'):
557 557 dirname, basename = util.split(f)
558 558 # If basename is empty, then the path is malformed because it points
559 559 # to a directory. Let the posixfile() call below raise IOError.
560 560 if basename:
561 561 if atomictemp:
562 562 util.makedirs(dirname, self.createmode, notindexed)
563 563 return util.atomictempfile(f, mode, self.createmode,
564 564 checkambig=checkambig)
565 565 try:
566 566 if 'w' in mode:
567 567 util.unlink(f)
568 568 nlink = 0
569 569 else:
570 570 # nlinks() may behave differently for files on Windows
571 571 # shares if the file is open.
572 572 with util.posixfile(f):
573 573 nlink = util.nlinks(f)
574 574 if nlink < 1:
575 575 nlink = 2 # force mktempcopy (issue1922)
576 576 except (OSError, IOError) as e:
577 577 if e.errno != errno.ENOENT:
578 578 raise
579 579 nlink = 0
580 580 util.makedirs(dirname, self.createmode, notindexed)
581 581 if nlink > 0:
582 582 if self._trustnlink is None:
583 583 self._trustnlink = nlink > 1 or util.checknlink(f)
584 584 if nlink > 1 or not self._trustnlink:
585 585 util.rename(util.mktempcopy(f), f)
586 586 fp = util.posixfile(f, mode)
587 587 if nlink == 0:
588 588 self._fixfilemode(f)
589 589
590 590 if backgroundclose:
591 591 if not self._backgroundfilecloser:
592 592 raise error.Abort(_('backgroundclose can only be used when a '
593 593 'backgroundclosing context manager is active')
594 594 )
595 595
596 596 fp = delayclosedfile(fp, self._backgroundfilecloser)
597 597
598 598 return fp
599 599
600 600 def symlink(self, src, dst):
601 601 self.audit(dst)
602 602 linkname = self.join(dst)
603 603 try:
604 604 os.unlink(linkname)
605 605 except OSError:
606 606 pass
607 607
608 608 util.makedirs(os.path.dirname(linkname), self.createmode)
609 609
610 610 if self._cansymlink:
611 611 try:
612 612 os.symlink(src, linkname)
613 613 except OSError as err:
614 614 raise OSError(err.errno, _('could not symlink to %r: %s') %
615 615 (src, err.strerror), linkname)
616 616 else:
617 617 self.write(dst, src)
618 618
619 619 def join(self, path, *insidef):
620 620 if path:
621 621 return os.path.join(self.base, path, *insidef)
622 622 else:
623 623 return self.base
624 624
625 625 opener = vfs
626 626
627 627 class auditvfs(object):
628 628 def __init__(self, vfs):
629 629 self.vfs = vfs
630 630
631 631 @property
632 632 def mustaudit(self):
633 633 return self.vfs.mustaudit
634 634
635 635 @mustaudit.setter
636 636 def mustaudit(self, onoff):
637 637 self.vfs.mustaudit = onoff
638 638
639 639 @property
640 640 def options(self):
641 641 return self.vfs.options
642 642
643 643 @options.setter
644 644 def options(self, value):
645 645 self.vfs.options = value
646 646
647 647 class filtervfs(abstractvfs, auditvfs):
648 648 '''Wrapper vfs for filtering filenames with a function.'''
649 649
650 650 def __init__(self, vfs, filter):
651 651 auditvfs.__init__(self, vfs)
652 652 self._filter = filter
653 653
654 654 def __call__(self, path, *args, **kwargs):
655 655 return self.vfs(self._filter(path), *args, **kwargs)
656 656
657 657 def join(self, path, *insidef):
658 658 if path:
659 659 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
660 660 else:
661 661 return self.vfs.join(path)
662 662
663 663 filteropener = filtervfs
664 664
665 665 class readonlyvfs(abstractvfs, auditvfs):
666 666 '''Wrapper vfs preventing any writing.'''
667 667
668 668 def __init__(self, vfs):
669 669 auditvfs.__init__(self, vfs)
670 670
671 671 def __call__(self, path, mode='r', *args, **kw):
672 672 if mode not in ('r', 'rb'):
673 673 raise error.Abort(_('this vfs is read only'))
674 674 return self.vfs(path, mode, *args, **kw)
675 675
676 676 def join(self, path, *insidef):
677 677 return self.vfs.join(path, *insidef)
678 678
679 679 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
680 680 '''yield every hg repository under path, always recursively.
681 681 The recurse flag will only control recursion into repo working dirs'''
682 682 def errhandler(err):
683 683 if err.filename == path:
684 684 raise err
685 685 samestat = getattr(os.path, 'samestat', None)
686 686 if followsym and samestat is not None:
687 687 def adddir(dirlst, dirname):
688 688 match = False
689 689 dirstat = os.stat(dirname)
690 690 for lstdirstat in dirlst:
691 691 if samestat(dirstat, lstdirstat):
692 692 match = True
693 693 break
694 694 if not match:
695 695 dirlst.append(dirstat)
696 696 return not match
697 697 else:
698 698 followsym = False
699 699
700 700 if (seen_dirs is None) and followsym:
701 701 seen_dirs = []
702 702 adddir(seen_dirs, path)
703 703 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
704 704 dirs.sort()
705 705 if '.hg' in dirs:
706 706 yield root # found a repository
707 707 qroot = os.path.join(root, '.hg', 'patches')
708 708 if os.path.isdir(os.path.join(qroot, '.hg')):
709 709 yield qroot # we have a patch queue repo here
710 710 if recurse:
711 711 # avoid recursing inside the .hg directory
712 712 dirs.remove('.hg')
713 713 else:
714 714 dirs[:] = [] # don't descend further
715 715 elif followsym:
716 716 newdirs = []
717 717 for d in dirs:
718 718 fname = os.path.join(root, d)
719 719 if adddir(seen_dirs, fname):
720 720 if os.path.islink(fname):
721 721 for hgname in walkrepos(fname, True, seen_dirs):
722 722 yield hgname
723 723 else:
724 724 newdirs.append(d)
725 725 dirs[:] = newdirs
726 726
727 727 def osrcpath():
728 728 '''return default os-specific hgrc search path'''
729 729 path = []
730 730 defaultpath = os.path.join(util.datapath, 'default.d')
731 731 if os.path.isdir(defaultpath):
732 732 for f, kind in osutil.listdir(defaultpath):
733 733 if f.endswith('.rc'):
734 734 path.append(os.path.join(defaultpath, f))
735 735 path.extend(systemrcpath())
736 736 path.extend(userrcpath())
737 737 path = [os.path.normpath(f) for f in path]
738 738 return path
739 739
740 740 _rcpath = None
741 741
742 742 def rcpath():
743 743 '''return hgrc search path. if env var HGRCPATH is set, use it.
744 744 for each item in path, if directory, use files ending in .rc,
745 745 else use item.
746 746 make HGRCPATH empty to only look in .hg/hgrc of current repo.
747 747 if no HGRCPATH, use default os-specific path.'''
748 748 global _rcpath
749 749 if _rcpath is None:
750 750 if 'HGRCPATH' in os.environ:
751 751 _rcpath = []
752 752 for p in os.environ['HGRCPATH'].split(os.pathsep):
753 753 if not p:
754 754 continue
755 755 p = util.expandpath(p)
756 756 if os.path.isdir(p):
757 757 for f, kind in osutil.listdir(p):
758 758 if f.endswith('.rc'):
759 759 _rcpath.append(os.path.join(p, f))
760 760 else:
761 761 _rcpath.append(p)
762 762 else:
763 763 _rcpath = osrcpath()
764 764 return _rcpath
765 765
766 766 def intrev(rev):
767 767 """Return integer for a given revision that can be used in comparison or
768 768 arithmetic operation"""
769 769 if rev is None:
770 770 return wdirrev
771 771 return rev
772 772
773 773 def revsingle(repo, revspec, default='.'):
774 774 if not revspec and revspec != 0:
775 775 return repo[default]
776 776
777 777 l = revrange(repo, [revspec])
778 778 if not l:
779 779 raise error.Abort(_('empty revision set'))
780 780 return repo[l.last()]
781 781
782 782 def _pairspec(revspec):
783 783 tree = revset.parse(revspec)
784 784 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
785 785 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
786 786
787 787 def revpair(repo, revs):
788 788 if not revs:
789 789 return repo.dirstate.p1(), None
790 790
791 791 l = revrange(repo, revs)
792 792
793 793 if not l:
794 794 first = second = None
795 795 elif l.isascending():
796 796 first = l.min()
797 797 second = l.max()
798 798 elif l.isdescending():
799 799 first = l.max()
800 800 second = l.min()
801 801 else:
802 802 first = l.first()
803 803 second = l.last()
804 804
805 805 if first is None:
806 806 raise error.Abort(_('empty revision range'))
807 807 if (first == second and len(revs) >= 2
808 808 and not all(revrange(repo, [r]) for r in revs)):
809 809 raise error.Abort(_('empty revision on one side of range'))
810 810
811 811 # if top-level is range expression, the result must always be a pair
812 812 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
813 813 return repo.lookup(first), None
814 814
815 815 return repo.lookup(first), repo.lookup(second)
816 816
817 817 def revrange(repo, specs):
818 818 """Execute 1 to many revsets and return the union.
819 819
820 820 This is the preferred mechanism for executing revsets using user-specified
821 821 config options, such as revset aliases.
822 822
823 823 The revsets specified by ``specs`` will be executed via a chained ``OR``
824 824 expression. If ``specs`` is empty, an empty result is returned.
825 825
826 826 ``specs`` can contain integers, in which case they are assumed to be
827 827 revision numbers.
828 828
829 829 It is assumed the revsets are already formatted. If you have arguments
830 830 that need to be expanded in the revset, call ``revset.formatspec()``
831 831 and pass the result as an element of ``specs``.
832 832
833 833 Specifying a single revset is allowed.
834 834
835 835 Returns a ``revset.abstractsmartset`` which is a list-like interface over
836 836 integer revisions.
837 837 """
838 838 allspecs = []
839 839 for spec in specs:
840 840 if isinstance(spec, int):
841 841 spec = revset.formatspec('rev(%d)', spec)
842 842 allspecs.append(spec)
843 843 m = revset.matchany(repo.ui, allspecs, repo)
844 844 return m(repo)
845 845
846 846 def meaningfulparents(repo, ctx):
847 847 """Return list of meaningful (or all if debug) parentrevs for rev.
848 848
849 849 For merges (two non-nullrev revisions) both parents are meaningful.
850 850 Otherwise the first parent revision is considered meaningful if it
851 851 is not the preceding revision.
852 852 """
853 853 parents = ctx.parents()
854 854 if len(parents) > 1:
855 855 return parents
856 856 if repo.ui.debugflag:
857 857 return [parents[0], repo['null']]
858 858 if parents[0].rev() >= intrev(ctx.rev()) - 1:
859 859 return []
860 860 return parents
861 861
862 862 def expandpats(pats):
863 863 '''Expand bare globs when running on windows.
864 864 On posix we assume it already has already been done by sh.'''
865 865 if not util.expandglobs:
866 866 return list(pats)
867 867 ret = []
868 868 for kindpat in pats:
869 869 kind, pat = matchmod._patsplit(kindpat, None)
870 870 if kind is None:
871 871 try:
872 872 globbed = glob.glob(pat)
873 873 except re.error:
874 874 globbed = [pat]
875 875 if globbed:
876 876 ret.extend(globbed)
877 877 continue
878 878 ret.append(kindpat)
879 879 return ret
880 880
881 881 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
882 882 badfn=None):
883 883 '''Return a matcher and the patterns that were used.
884 884 The matcher will warn about bad matches, unless an alternate badfn callback
885 885 is provided.'''
886 886 if pats == ("",):
887 887 pats = []
888 888 if opts is None:
889 889 opts = {}
890 890 if not globbed and default == 'relpath':
891 891 pats = expandpats(pats or [])
892 892
893 893 def bad(f, msg):
894 894 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
895 895
896 896 if badfn is None:
897 897 badfn = bad
898 898
899 899 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
900 900 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
901 901
902 902 if m.always():
903 903 pats = []
904 904 return m, pats
905 905
906 906 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
907 907 badfn=None):
908 908 '''Return a matcher that will warn about bad matches.'''
909 909 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
910 910
911 911 def matchall(repo):
912 912 '''Return a matcher that will efficiently match everything.'''
913 913 return matchmod.always(repo.root, repo.getcwd())
914 914
915 915 def matchfiles(repo, files, badfn=None):
916 916 '''Return a matcher that will efficiently match exactly these files.'''
917 917 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
918 918
919 919 def origpath(ui, repo, filepath):
920 920 '''customize where .orig files are created
921 921
922 922 Fetch user defined path from config file: [ui] origbackuppath = <path>
923 923 Fall back to default (filepath) if not specified
924 924 '''
925 925 origbackuppath = ui.config('ui', 'origbackuppath', None)
926 926 if origbackuppath is None:
927 927 return filepath + ".orig"
928 928
929 929 filepathfromroot = os.path.relpath(filepath, start=repo.root)
930 930 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
931 931
932 932 origbackupdir = repo.vfs.dirname(fullorigpath)
933 933 if not repo.vfs.exists(origbackupdir):
934 934 ui.note(_('creating directory: %s\n') % origbackupdir)
935 935 util.makedirs(origbackupdir)
936 936
937 937 return fullorigpath + ".orig"
938 938
939 939 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
940 940 if opts is None:
941 941 opts = {}
942 942 m = matcher
943 943 if dry_run is None:
944 944 dry_run = opts.get('dry_run')
945 945 if similarity is None:
946 946 similarity = float(opts.get('similarity') or 0)
947 947
948 948 ret = 0
949 949 join = lambda f: os.path.join(prefix, f)
950 950
951 def matchessubrepo(matcher, subpath):
952 if matcher.exact(subpath):
953 return True
954 for f in matcher.files():
955 if f.startswith(subpath):
956 return True
957 return False
958
959 951 wctx = repo[None]
960 952 for subpath in sorted(wctx.substate):
961 if opts.get('subrepos') or matchessubrepo(m, subpath):
953 if opts.get('subrepos') or m.matchessubrepo(subpath):
962 954 sub = wctx.sub(subpath)
963 955 try:
964 956 submatch = matchmod.subdirmatcher(subpath, m)
965 957 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
966 958 ret = 1
967 959 except error.LookupError:
968 960 repo.ui.status(_("skipping missing subrepository: %s\n")
969 961 % join(subpath))
970 962
971 963 rejected = []
972 964 def badfn(f, msg):
973 965 if f in m.files():
974 966 m.bad(f, msg)
975 967 rejected.append(f)
976 968
977 969 badmatch = matchmod.badmatch(m, badfn)
978 970 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
979 971 badmatch)
980 972
981 973 unknownset = set(unknown + forgotten)
982 974 toprint = unknownset.copy()
983 975 toprint.update(deleted)
984 976 for abs in sorted(toprint):
985 977 if repo.ui.verbose or not m.exact(abs):
986 978 if abs in unknownset:
987 979 status = _('adding %s\n') % m.uipath(abs)
988 980 else:
989 981 status = _('removing %s\n') % m.uipath(abs)
990 982 repo.ui.status(status)
991 983
992 984 renames = _findrenames(repo, m, added + unknown, removed + deleted,
993 985 similarity)
994 986
995 987 if not dry_run:
996 988 _markchanges(repo, unknown + forgotten, deleted, renames)
997 989
998 990 for f in rejected:
999 991 if f in m.files():
1000 992 return 1
1001 993 return ret
1002 994
1003 995 def marktouched(repo, files, similarity=0.0):
1004 996 '''Assert that files have somehow been operated upon. files are relative to
1005 997 the repo root.'''
1006 998 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1007 999 rejected = []
1008 1000
1009 1001 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1010 1002
1011 1003 if repo.ui.verbose:
1012 1004 unknownset = set(unknown + forgotten)
1013 1005 toprint = unknownset.copy()
1014 1006 toprint.update(deleted)
1015 1007 for abs in sorted(toprint):
1016 1008 if abs in unknownset:
1017 1009 status = _('adding %s\n') % abs
1018 1010 else:
1019 1011 status = _('removing %s\n') % abs
1020 1012 repo.ui.status(status)
1021 1013
1022 1014 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1023 1015 similarity)
1024 1016
1025 1017 _markchanges(repo, unknown + forgotten, deleted, renames)
1026 1018
1027 1019 for f in rejected:
1028 1020 if f in m.files():
1029 1021 return 1
1030 1022 return 0
1031 1023
1032 1024 def _interestingfiles(repo, matcher):
1033 1025 '''Walk dirstate with matcher, looking for files that addremove would care
1034 1026 about.
1035 1027
1036 1028 This is different from dirstate.status because it doesn't care about
1037 1029 whether files are modified or clean.'''
1038 1030 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1039 1031 audit_path = pathutil.pathauditor(repo.root)
1040 1032
1041 1033 ctx = repo[None]
1042 1034 dirstate = repo.dirstate
1043 1035 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1044 1036 full=False)
1045 1037 for abs, st in walkresults.iteritems():
1046 1038 dstate = dirstate[abs]
1047 1039 if dstate == '?' and audit_path.check(abs):
1048 1040 unknown.append(abs)
1049 1041 elif dstate != 'r' and not st:
1050 1042 deleted.append(abs)
1051 1043 elif dstate == 'r' and st:
1052 1044 forgotten.append(abs)
1053 1045 # for finding renames
1054 1046 elif dstate == 'r' and not st:
1055 1047 removed.append(abs)
1056 1048 elif dstate == 'a':
1057 1049 added.append(abs)
1058 1050
1059 1051 return added, unknown, deleted, removed, forgotten
1060 1052
1061 1053 def _findrenames(repo, matcher, added, removed, similarity):
1062 1054 '''Find renames from removed files to added ones.'''
1063 1055 renames = {}
1064 1056 if similarity > 0:
1065 1057 for old, new, score in similar.findrenames(repo, added, removed,
1066 1058 similarity):
1067 1059 if (repo.ui.verbose or not matcher.exact(old)
1068 1060 or not matcher.exact(new)):
1069 1061 repo.ui.status(_('recording removal of %s as rename to %s '
1070 1062 '(%d%% similar)\n') %
1071 1063 (matcher.rel(old), matcher.rel(new),
1072 1064 score * 100))
1073 1065 renames[new] = old
1074 1066 return renames
1075 1067
1076 1068 def _markchanges(repo, unknown, deleted, renames):
1077 1069 '''Marks the files in unknown as added, the files in deleted as removed,
1078 1070 and the files in renames as copied.'''
1079 1071 wctx = repo[None]
1080 1072 with repo.wlock():
1081 1073 wctx.forget(deleted)
1082 1074 wctx.add(unknown)
1083 1075 for new, old in renames.iteritems():
1084 1076 wctx.copy(old, new)
1085 1077
1086 1078 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1087 1079 """Update the dirstate to reflect the intent of copying src to dst. For
1088 1080 different reasons it might not end with dst being marked as copied from src.
1089 1081 """
1090 1082 origsrc = repo.dirstate.copied(src) or src
1091 1083 if dst == origsrc: # copying back a copy?
1092 1084 if repo.dirstate[dst] not in 'mn' and not dryrun:
1093 1085 repo.dirstate.normallookup(dst)
1094 1086 else:
1095 1087 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1096 1088 if not ui.quiet:
1097 1089 ui.warn(_("%s has not been committed yet, so no copy "
1098 1090 "data will be stored for %s.\n")
1099 1091 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1100 1092 if repo.dirstate[dst] in '?r' and not dryrun:
1101 1093 wctx.add([dst])
1102 1094 elif not dryrun:
1103 1095 wctx.copy(origsrc, dst)
1104 1096
1105 1097 def readrequires(opener, supported):
1106 1098 '''Reads and parses .hg/requires and checks if all entries found
1107 1099 are in the list of supported features.'''
1108 1100 requirements = set(opener.read("requires").splitlines())
1109 1101 missings = []
1110 1102 for r in requirements:
1111 1103 if r not in supported:
1112 1104 if not r or not r[0].isalnum():
1113 1105 raise error.RequirementError(_(".hg/requires file is corrupt"))
1114 1106 missings.append(r)
1115 1107 missings.sort()
1116 1108 if missings:
1117 1109 raise error.RequirementError(
1118 1110 _("repository requires features unknown to this Mercurial: %s")
1119 1111 % " ".join(missings),
1120 1112 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1121 1113 " for more information"))
1122 1114 return requirements
1123 1115
1124 1116 def writerequires(opener, requirements):
1125 1117 with opener('requires', 'w') as fp:
1126 1118 for r in sorted(requirements):
1127 1119 fp.write("%s\n" % r)
1128 1120
1129 1121 class filecachesubentry(object):
1130 1122 def __init__(self, path, stat):
1131 1123 self.path = path
1132 1124 self.cachestat = None
1133 1125 self._cacheable = None
1134 1126
1135 1127 if stat:
1136 1128 self.cachestat = filecachesubentry.stat(self.path)
1137 1129
1138 1130 if self.cachestat:
1139 1131 self._cacheable = self.cachestat.cacheable()
1140 1132 else:
1141 1133 # None means we don't know yet
1142 1134 self._cacheable = None
1143 1135
1144 1136 def refresh(self):
1145 1137 if self.cacheable():
1146 1138 self.cachestat = filecachesubentry.stat(self.path)
1147 1139
1148 1140 def cacheable(self):
1149 1141 if self._cacheable is not None:
1150 1142 return self._cacheable
1151 1143
1152 1144 # we don't know yet, assume it is for now
1153 1145 return True
1154 1146
1155 1147 def changed(self):
1156 1148 # no point in going further if we can't cache it
1157 1149 if not self.cacheable():
1158 1150 return True
1159 1151
1160 1152 newstat = filecachesubentry.stat(self.path)
1161 1153
1162 1154 # we may not know if it's cacheable yet, check again now
1163 1155 if newstat and self._cacheable is None:
1164 1156 self._cacheable = newstat.cacheable()
1165 1157
1166 1158 # check again
1167 1159 if not self._cacheable:
1168 1160 return True
1169 1161
1170 1162 if self.cachestat != newstat:
1171 1163 self.cachestat = newstat
1172 1164 return True
1173 1165 else:
1174 1166 return False
1175 1167
1176 1168 @staticmethod
1177 1169 def stat(path):
1178 1170 try:
1179 1171 return util.cachestat(path)
1180 1172 except OSError as e:
1181 1173 if e.errno != errno.ENOENT:
1182 1174 raise
1183 1175
1184 1176 class filecacheentry(object):
1185 1177 def __init__(self, paths, stat=True):
1186 1178 self._entries = []
1187 1179 for path in paths:
1188 1180 self._entries.append(filecachesubentry(path, stat))
1189 1181
1190 1182 def changed(self):
1191 1183 '''true if any entry has changed'''
1192 1184 for entry in self._entries:
1193 1185 if entry.changed():
1194 1186 return True
1195 1187 return False
1196 1188
1197 1189 def refresh(self):
1198 1190 for entry in self._entries:
1199 1191 entry.refresh()
1200 1192
1201 1193 class filecache(object):
1202 1194 '''A property like decorator that tracks files under .hg/ for updates.
1203 1195
1204 1196 Records stat info when called in _filecache.
1205 1197
1206 1198 On subsequent calls, compares old stat info with new info, and recreates the
1207 1199 object when any of the files changes, updating the new stat info in
1208 1200 _filecache.
1209 1201
1210 1202 Mercurial either atomic renames or appends for files under .hg,
1211 1203 so to ensure the cache is reliable we need the filesystem to be able
1212 1204 to tell us if a file has been replaced. If it can't, we fallback to
1213 1205 recreating the object on every call (essentially the same behavior as
1214 1206 propertycache).
1215 1207
1216 1208 '''
1217 1209 def __init__(self, *paths):
1218 1210 self.paths = paths
1219 1211
1220 1212 def join(self, obj, fname):
1221 1213 """Used to compute the runtime path of a cached file.
1222 1214
1223 1215 Users should subclass filecache and provide their own version of this
1224 1216 function to call the appropriate join function on 'obj' (an instance
1225 1217 of the class that its member function was decorated).
1226 1218 """
1227 1219 return obj.join(fname)
1228 1220
1229 1221 def __call__(self, func):
1230 1222 self.func = func
1231 1223 self.name = func.__name__
1232 1224 return self
1233 1225
1234 1226 def __get__(self, obj, type=None):
1235 1227 # if accessed on the class, return the descriptor itself.
1236 1228 if obj is None:
1237 1229 return self
1238 1230 # do we need to check if the file changed?
1239 1231 if self.name in obj.__dict__:
1240 1232 assert self.name in obj._filecache, self.name
1241 1233 return obj.__dict__[self.name]
1242 1234
1243 1235 entry = obj._filecache.get(self.name)
1244 1236
1245 1237 if entry:
1246 1238 if entry.changed():
1247 1239 entry.obj = self.func(obj)
1248 1240 else:
1249 1241 paths = [self.join(obj, path) for path in self.paths]
1250 1242
1251 1243 # We stat -before- creating the object so our cache doesn't lie if
1252 1244 # a writer modified between the time we read and stat
1253 1245 entry = filecacheentry(paths, True)
1254 1246 entry.obj = self.func(obj)
1255 1247
1256 1248 obj._filecache[self.name] = entry
1257 1249
1258 1250 obj.__dict__[self.name] = entry.obj
1259 1251 return entry.obj
1260 1252
1261 1253 def __set__(self, obj, value):
1262 1254 if self.name not in obj._filecache:
1263 1255 # we add an entry for the missing value because X in __dict__
1264 1256 # implies X in _filecache
1265 1257 paths = [self.join(obj, path) for path in self.paths]
1266 1258 ce = filecacheentry(paths, False)
1267 1259 obj._filecache[self.name] = ce
1268 1260 else:
1269 1261 ce = obj._filecache[self.name]
1270 1262
1271 1263 ce.obj = value # update cached copy
1272 1264 obj.__dict__[self.name] = value # update copy returned by obj.x
1273 1265
1274 1266 def __delete__(self, obj):
1275 1267 try:
1276 1268 del obj.__dict__[self.name]
1277 1269 except KeyError:
1278 1270 raise AttributeError(self.name)
1279 1271
1280 1272 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1281 1273 if lock is None:
1282 1274 raise error.LockInheritanceContractViolation(
1283 1275 'lock can only be inherited while held')
1284 1276 if environ is None:
1285 1277 environ = {}
1286 1278 with lock.inherit() as locker:
1287 1279 environ[envvar] = locker
1288 1280 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1289 1281
1290 1282 def wlocksub(repo, cmd, *args, **kwargs):
1291 1283 """run cmd as a subprocess that allows inheriting repo's wlock
1292 1284
1293 1285 This can only be called while the wlock is held. This takes all the
1294 1286 arguments that ui.system does, and returns the exit code of the
1295 1287 subprocess."""
1296 1288 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1297 1289 **kwargs)
1298 1290
1299 1291 def gdinitconfig(ui):
1300 1292 """helper function to know if a repo should be created as general delta
1301 1293 """
1302 1294 # experimental config: format.generaldelta
1303 1295 return (ui.configbool('format', 'generaldelta', False)
1304 1296 or ui.configbool('format', 'usegeneraldelta', True))
1305 1297
1306 1298 def gddeltaconfig(ui):
1307 1299 """helper function to know if incoming delta should be optimised
1308 1300 """
1309 1301 # experimental config: format.generaldelta
1310 1302 return ui.configbool('format', 'generaldelta', False)
1311 1303
1312 1304 class delayclosedfile(object):
1313 1305 """Proxy for a file object whose close is delayed.
1314 1306
1315 1307 Do not instantiate outside of the vfs layer.
1316 1308 """
1317 1309
1318 1310 def __init__(self, fh, closer):
1319 1311 object.__setattr__(self, '_origfh', fh)
1320 1312 object.__setattr__(self, '_closer', closer)
1321 1313
1322 1314 def __getattr__(self, attr):
1323 1315 return getattr(self._origfh, attr)
1324 1316
1325 1317 def __setattr__(self, attr, value):
1326 1318 return setattr(self._origfh, attr, value)
1327 1319
1328 1320 def __delattr__(self, attr):
1329 1321 return delattr(self._origfh, attr)
1330 1322
1331 1323 def __enter__(self):
1332 1324 return self._origfh.__enter__()
1333 1325
1334 1326 def __exit__(self, exc_type, exc_value, exc_tb):
1335 1327 self._closer.close(self._origfh)
1336 1328
1337 1329 def close(self):
1338 1330 self._closer.close(self._origfh)
1339 1331
1340 1332 class backgroundfilecloser(object):
1341 1333 """Coordinates background closing of file handles on multiple threads."""
1342 1334 def __init__(self, ui, expectedcount=-1):
1343 1335 self._running = False
1344 1336 self._entered = False
1345 1337 self._threads = []
1346 1338 self._threadexception = None
1347 1339
1348 1340 # Only Windows/NTFS has slow file closing. So only enable by default
1349 1341 # on that platform. But allow to be enabled elsewhere for testing.
1350 1342 defaultenabled = os.name == 'nt'
1351 1343 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1352 1344
1353 1345 if not enabled:
1354 1346 return
1355 1347
1356 1348 # There is overhead to starting and stopping the background threads.
1357 1349 # Don't do background processing unless the file count is large enough
1358 1350 # to justify it.
1359 1351 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1360 1352 2048)
1361 1353 # FUTURE dynamically start background threads after minfilecount closes.
1362 1354 # (We don't currently have any callers that don't know their file count)
1363 1355 if expectedcount > 0 and expectedcount < minfilecount:
1364 1356 return
1365 1357
1366 1358 # Windows defaults to a limit of 512 open files. A buffer of 128
1367 1359 # should give us enough headway.
1368 1360 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1369 1361 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1370 1362
1371 1363 ui.debug('starting %d threads for background file closing\n' %
1372 1364 threadcount)
1373 1365
1374 1366 self._queue = util.queue(maxsize=maxqueue)
1375 1367 self._running = True
1376 1368
1377 1369 for i in range(threadcount):
1378 1370 t = threading.Thread(target=self._worker, name='backgroundcloser')
1379 1371 self._threads.append(t)
1380 1372 t.start()
1381 1373
1382 1374 def __enter__(self):
1383 1375 self._entered = True
1384 1376 return self
1385 1377
1386 1378 def __exit__(self, exc_type, exc_value, exc_tb):
1387 1379 self._running = False
1388 1380
1389 1381 # Wait for threads to finish closing so open files don't linger for
1390 1382 # longer than lifetime of context manager.
1391 1383 for t in self._threads:
1392 1384 t.join()
1393 1385
1394 1386 def _worker(self):
1395 1387 """Main routine for worker thread."""
1396 1388 while True:
1397 1389 try:
1398 1390 fh = self._queue.get(block=True, timeout=0.100)
1399 1391 # Need to catch or the thread will terminate and
1400 1392 # we could orphan file descriptors.
1401 1393 try:
1402 1394 fh.close()
1403 1395 except Exception as e:
1404 1396 # Stash so can re-raise from main thread later.
1405 1397 self._threadexception = e
1406 1398 except util.empty:
1407 1399 if not self._running:
1408 1400 break
1409 1401
1410 1402 def close(self, fh):
1411 1403 """Schedule a file for closing."""
1412 1404 if not self._entered:
1413 1405 raise error.Abort(_('can only call close() when context manager '
1414 1406 'active'))
1415 1407
1416 1408 # If a background thread encountered an exception, raise now so we fail
1417 1409 # fast. Otherwise we may potentially go on for minutes until the error
1418 1410 # is acted on.
1419 1411 if self._threadexception:
1420 1412 e = self._threadexception
1421 1413 self._threadexception = None
1422 1414 raise e
1423 1415
1424 1416 # If we're not actively running, close synchronously.
1425 1417 if not self._running:
1426 1418 fh.close()
1427 1419 return
1428 1420
1429 1421 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now