##// END OF EJS Templates
clfilter: remove usage of `range` in favor of iteration over changelog...
Pierre-Yves David -
r17675:8575f4a2 default
parent child Browse files
Show More
@@ -1,1956 +1,1957
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import subrepo, context, repair, bookmarks, graphmod, revset, phases, obsolete
14 14 import lock as lockmod
15 15
16 16 def parsealiases(cmd):
17 17 return cmd.lstrip("^").split("|")
18 18
19 19 def findpossible(cmd, table, strict=False):
20 20 """
21 21 Return cmd -> (aliases, command table entry)
22 22 for each matching command.
23 23 Return debug commands (or their aliases) only if no normal command matches.
24 24 """
25 25 choice = {}
26 26 debugchoice = {}
27 27
28 28 if cmd in table:
29 29 # short-circuit exact matches, "log" alias beats "^log|history"
30 30 keys = [cmd]
31 31 else:
32 32 keys = table.keys()
33 33
34 34 for e in keys:
35 35 aliases = parsealiases(e)
36 36 found = None
37 37 if cmd in aliases:
38 38 found = cmd
39 39 elif not strict:
40 40 for a in aliases:
41 41 if a.startswith(cmd):
42 42 found = a
43 43 break
44 44 if found is not None:
45 45 if aliases[0].startswith("debug") or found.startswith("debug"):
46 46 debugchoice[found] = (aliases, table[e])
47 47 else:
48 48 choice[found] = (aliases, table[e])
49 49
50 50 if not choice and debugchoice:
51 51 choice = debugchoice
52 52
53 53 return choice
54 54
55 55 def findcmd(cmd, table, strict=True):
56 56 """Return (aliases, command table entry) for command string."""
57 57 choice = findpossible(cmd, table, strict)
58 58
59 59 if cmd in choice:
60 60 return choice[cmd]
61 61
62 62 if len(choice) > 1:
63 63 clist = choice.keys()
64 64 clist.sort()
65 65 raise error.AmbiguousCommand(cmd, clist)
66 66
67 67 if choice:
68 68 return choice.values()[0]
69 69
70 70 raise error.UnknownCommand(cmd)
71 71
72 72 def findrepo(p):
73 73 while not os.path.isdir(os.path.join(p, ".hg")):
74 74 oldp, p = p, os.path.dirname(p)
75 75 if p == oldp:
76 76 return None
77 77
78 78 return p
79 79
80 80 def bailifchanged(repo):
81 81 if repo.dirstate.p2() != nullid:
82 82 raise util.Abort(_('outstanding uncommitted merge'))
83 83 modified, added, removed, deleted = repo.status()[:4]
84 84 if modified or added or removed or deleted:
85 85 raise util.Abort(_("outstanding uncommitted changes"))
86 86 ctx = repo[None]
87 87 for s in ctx.substate:
88 88 if ctx.sub(s).dirty():
89 89 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
90 90
91 91 def logmessage(ui, opts):
92 92 """ get the log message according to -m and -l option """
93 93 message = opts.get('message')
94 94 logfile = opts.get('logfile')
95 95
96 96 if message and logfile:
97 97 raise util.Abort(_('options --message and --logfile are mutually '
98 98 'exclusive'))
99 99 if not message and logfile:
100 100 try:
101 101 if logfile == '-':
102 102 message = ui.fin.read()
103 103 else:
104 104 message = '\n'.join(util.readfile(logfile).splitlines())
105 105 except IOError, inst:
106 106 raise util.Abort(_("can't read commit message '%s': %s") %
107 107 (logfile, inst.strerror))
108 108 return message
109 109
110 110 def loglimit(opts):
111 111 """get the log limit according to option -l/--limit"""
112 112 limit = opts.get('limit')
113 113 if limit:
114 114 try:
115 115 limit = int(limit)
116 116 except ValueError:
117 117 raise util.Abort(_('limit must be a positive integer'))
118 118 if limit <= 0:
119 119 raise util.Abort(_('limit must be positive'))
120 120 else:
121 121 limit = None
122 122 return limit
123 123
124 124 def makefilename(repo, pat, node, desc=None,
125 125 total=None, seqno=None, revwidth=None, pathname=None):
126 126 node_expander = {
127 127 'H': lambda: hex(node),
128 128 'R': lambda: str(repo.changelog.rev(node)),
129 129 'h': lambda: short(node),
130 130 'm': lambda: re.sub('[^\w]', '_', str(desc))
131 131 }
132 132 expander = {
133 133 '%': lambda: '%',
134 134 'b': lambda: os.path.basename(repo.root),
135 135 }
136 136
137 137 try:
138 138 if node:
139 139 expander.update(node_expander)
140 140 if node:
141 141 expander['r'] = (lambda:
142 142 str(repo.changelog.rev(node)).zfill(revwidth or 0))
143 143 if total is not None:
144 144 expander['N'] = lambda: str(total)
145 145 if seqno is not None:
146 146 expander['n'] = lambda: str(seqno)
147 147 if total is not None and seqno is not None:
148 148 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
149 149 if pathname is not None:
150 150 expander['s'] = lambda: os.path.basename(pathname)
151 151 expander['d'] = lambda: os.path.dirname(pathname) or '.'
152 152 expander['p'] = lambda: pathname
153 153
154 154 newname = []
155 155 patlen = len(pat)
156 156 i = 0
157 157 while i < patlen:
158 158 c = pat[i]
159 159 if c == '%':
160 160 i += 1
161 161 c = pat[i]
162 162 c = expander[c]()
163 163 newname.append(c)
164 164 i += 1
165 165 return ''.join(newname)
166 166 except KeyError, inst:
167 167 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
168 168 inst.args[0])
169 169
170 170 def makefileobj(repo, pat, node=None, desc=None, total=None,
171 171 seqno=None, revwidth=None, mode='wb', pathname=None):
172 172
173 173 writable = mode not in ('r', 'rb')
174 174
175 175 if not pat or pat == '-':
176 176 fp = writable and repo.ui.fout or repo.ui.fin
177 177 if util.safehasattr(fp, 'fileno'):
178 178 return os.fdopen(os.dup(fp.fileno()), mode)
179 179 else:
180 180 # if this fp can't be duped properly, return
181 181 # a dummy object that can be closed
182 182 class wrappedfileobj(object):
183 183 noop = lambda x: None
184 184 def __init__(self, f):
185 185 self.f = f
186 186 def __getattr__(self, attr):
187 187 if attr == 'close':
188 188 return self.noop
189 189 else:
190 190 return getattr(self.f, attr)
191 191
192 192 return wrappedfileobj(fp)
193 193 if util.safehasattr(pat, 'write') and writable:
194 194 return pat
195 195 if util.safehasattr(pat, 'read') and 'r' in mode:
196 196 return pat
197 197 return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
198 198 pathname),
199 199 mode)
200 200
201 201 def openrevlog(repo, cmd, file_, opts):
202 202 """opens the changelog, manifest, a filelog or a given revlog"""
203 203 cl = opts['changelog']
204 204 mf = opts['manifest']
205 205 msg = None
206 206 if cl and mf:
207 207 msg = _('cannot specify --changelog and --manifest at the same time')
208 208 elif cl or mf:
209 209 if file_:
210 210 msg = _('cannot specify filename with --changelog or --manifest')
211 211 elif not repo:
212 212 msg = _('cannot specify --changelog or --manifest '
213 213 'without a repository')
214 214 if msg:
215 215 raise util.Abort(msg)
216 216
217 217 r = None
218 218 if repo:
219 219 if cl:
220 220 r = repo.changelog
221 221 elif mf:
222 222 r = repo.manifest
223 223 elif file_:
224 224 filelog = repo.file(file_)
225 225 if len(filelog):
226 226 r = filelog
227 227 if not r:
228 228 if not file_:
229 229 raise error.CommandError(cmd, _('invalid arguments'))
230 230 if not os.path.isfile(file_):
231 231 raise util.Abort(_("revlog '%s' not found") % file_)
232 232 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
233 233 file_[:-2] + ".i")
234 234 return r
235 235
236 236 def copy(ui, repo, pats, opts, rename=False):
237 237 # called with the repo lock held
238 238 #
239 239 # hgsep => pathname that uses "/" to separate directories
240 240 # ossep => pathname that uses os.sep to separate directories
241 241 cwd = repo.getcwd()
242 242 targets = {}
243 243 after = opts.get("after")
244 244 dryrun = opts.get("dry_run")
245 245 wctx = repo[None]
246 246
247 247 def walkpat(pat):
248 248 srcs = []
249 249 badstates = after and '?' or '?r'
250 250 m = scmutil.match(repo[None], [pat], opts, globbed=True)
251 251 for abs in repo.walk(m):
252 252 state = repo.dirstate[abs]
253 253 rel = m.rel(abs)
254 254 exact = m.exact(abs)
255 255 if state in badstates:
256 256 if exact and state == '?':
257 257 ui.warn(_('%s: not copying - file is not managed\n') % rel)
258 258 if exact and state == 'r':
259 259 ui.warn(_('%s: not copying - file has been marked for'
260 260 ' remove\n') % rel)
261 261 continue
262 262 # abs: hgsep
263 263 # rel: ossep
264 264 srcs.append((abs, rel, exact))
265 265 return srcs
266 266
267 267 # abssrc: hgsep
268 268 # relsrc: ossep
269 269 # otarget: ossep
270 270 def copyfile(abssrc, relsrc, otarget, exact):
271 271 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
272 272 if '/' in abstarget:
273 273 # We cannot normalize abstarget itself, this would prevent
274 274 # case only renames, like a => A.
275 275 abspath, absname = abstarget.rsplit('/', 1)
276 276 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
277 277 reltarget = repo.pathto(abstarget, cwd)
278 278 target = repo.wjoin(abstarget)
279 279 src = repo.wjoin(abssrc)
280 280 state = repo.dirstate[abstarget]
281 281
282 282 scmutil.checkportable(ui, abstarget)
283 283
284 284 # check for collisions
285 285 prevsrc = targets.get(abstarget)
286 286 if prevsrc is not None:
287 287 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
288 288 (reltarget, repo.pathto(abssrc, cwd),
289 289 repo.pathto(prevsrc, cwd)))
290 290 return
291 291
292 292 # check for overwrites
293 293 exists = os.path.lexists(target)
294 294 samefile = False
295 295 if exists and abssrc != abstarget:
296 296 if (repo.dirstate.normalize(abssrc) ==
297 297 repo.dirstate.normalize(abstarget)):
298 298 if not rename:
299 299 ui.warn(_("%s: can't copy - same file\n") % reltarget)
300 300 return
301 301 exists = False
302 302 samefile = True
303 303
304 304 if not after and exists or after and state in 'mn':
305 305 if not opts['force']:
306 306 ui.warn(_('%s: not overwriting - file exists\n') %
307 307 reltarget)
308 308 return
309 309
310 310 if after:
311 311 if not exists:
312 312 if rename:
313 313 ui.warn(_('%s: not recording move - %s does not exist\n') %
314 314 (relsrc, reltarget))
315 315 else:
316 316 ui.warn(_('%s: not recording copy - %s does not exist\n') %
317 317 (relsrc, reltarget))
318 318 return
319 319 elif not dryrun:
320 320 try:
321 321 if exists:
322 322 os.unlink(target)
323 323 targetdir = os.path.dirname(target) or '.'
324 324 if not os.path.isdir(targetdir):
325 325 os.makedirs(targetdir)
326 326 if samefile:
327 327 tmp = target + "~hgrename"
328 328 os.rename(src, tmp)
329 329 os.rename(tmp, target)
330 330 else:
331 331 util.copyfile(src, target)
332 332 srcexists = True
333 333 except IOError, inst:
334 334 if inst.errno == errno.ENOENT:
335 335 ui.warn(_('%s: deleted in working copy\n') % relsrc)
336 336 srcexists = False
337 337 else:
338 338 ui.warn(_('%s: cannot copy - %s\n') %
339 339 (relsrc, inst.strerror))
340 340 return True # report a failure
341 341
342 342 if ui.verbose or not exact:
343 343 if rename:
344 344 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
345 345 else:
346 346 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
347 347
348 348 targets[abstarget] = abssrc
349 349
350 350 # fix up dirstate
351 351 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
352 352 dryrun=dryrun, cwd=cwd)
353 353 if rename and not dryrun:
354 354 if not after and srcexists and not samefile:
355 355 util.unlinkpath(repo.wjoin(abssrc))
356 356 wctx.forget([abssrc])
357 357
358 358 # pat: ossep
359 359 # dest ossep
360 360 # srcs: list of (hgsep, hgsep, ossep, bool)
361 361 # return: function that takes hgsep and returns ossep
362 362 def targetpathfn(pat, dest, srcs):
363 363 if os.path.isdir(pat):
364 364 abspfx = scmutil.canonpath(repo.root, cwd, pat)
365 365 abspfx = util.localpath(abspfx)
366 366 if destdirexists:
367 367 striplen = len(os.path.split(abspfx)[0])
368 368 else:
369 369 striplen = len(abspfx)
370 370 if striplen:
371 371 striplen += len(os.sep)
372 372 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
373 373 elif destdirexists:
374 374 res = lambda p: os.path.join(dest,
375 375 os.path.basename(util.localpath(p)))
376 376 else:
377 377 res = lambda p: dest
378 378 return res
379 379
380 380 # pat: ossep
381 381 # dest ossep
382 382 # srcs: list of (hgsep, hgsep, ossep, bool)
383 383 # return: function that takes hgsep and returns ossep
384 384 def targetpathafterfn(pat, dest, srcs):
385 385 if matchmod.patkind(pat):
386 386 # a mercurial pattern
387 387 res = lambda p: os.path.join(dest,
388 388 os.path.basename(util.localpath(p)))
389 389 else:
390 390 abspfx = scmutil.canonpath(repo.root, cwd, pat)
391 391 if len(abspfx) < len(srcs[0][0]):
392 392 # A directory. Either the target path contains the last
393 393 # component of the source path or it does not.
394 394 def evalpath(striplen):
395 395 score = 0
396 396 for s in srcs:
397 397 t = os.path.join(dest, util.localpath(s[0])[striplen:])
398 398 if os.path.lexists(t):
399 399 score += 1
400 400 return score
401 401
402 402 abspfx = util.localpath(abspfx)
403 403 striplen = len(abspfx)
404 404 if striplen:
405 405 striplen += len(os.sep)
406 406 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
407 407 score = evalpath(striplen)
408 408 striplen1 = len(os.path.split(abspfx)[0])
409 409 if striplen1:
410 410 striplen1 += len(os.sep)
411 411 if evalpath(striplen1) > score:
412 412 striplen = striplen1
413 413 res = lambda p: os.path.join(dest,
414 414 util.localpath(p)[striplen:])
415 415 else:
416 416 # a file
417 417 if destdirexists:
418 418 res = lambda p: os.path.join(dest,
419 419 os.path.basename(util.localpath(p)))
420 420 else:
421 421 res = lambda p: dest
422 422 return res
423 423
424 424
425 425 pats = scmutil.expandpats(pats)
426 426 if not pats:
427 427 raise util.Abort(_('no source or destination specified'))
428 428 if len(pats) == 1:
429 429 raise util.Abort(_('no destination specified'))
430 430 dest = pats.pop()
431 431 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
432 432 if not destdirexists:
433 433 if len(pats) > 1 or matchmod.patkind(pats[0]):
434 434 raise util.Abort(_('with multiple sources, destination must be an '
435 435 'existing directory'))
436 436 if util.endswithsep(dest):
437 437 raise util.Abort(_('destination %s is not a directory') % dest)
438 438
439 439 tfn = targetpathfn
440 440 if after:
441 441 tfn = targetpathafterfn
442 442 copylist = []
443 443 for pat in pats:
444 444 srcs = walkpat(pat)
445 445 if not srcs:
446 446 continue
447 447 copylist.append((tfn(pat, dest, srcs), srcs))
448 448 if not copylist:
449 449 raise util.Abort(_('no files to copy'))
450 450
451 451 errors = 0
452 452 for targetpath, srcs in copylist:
453 453 for abssrc, relsrc, exact in srcs:
454 454 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
455 455 errors += 1
456 456
457 457 if errors:
458 458 ui.warn(_('(consider using --after)\n'))
459 459
460 460 return errors != 0
461 461
462 462 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
463 463 runargs=None, appendpid=False):
464 464 '''Run a command as a service.'''
465 465
466 466 if opts['daemon'] and not opts['daemon_pipefds']:
467 467 # Signal child process startup with file removal
468 468 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
469 469 os.close(lockfd)
470 470 try:
471 471 if not runargs:
472 472 runargs = util.hgcmd() + sys.argv[1:]
473 473 runargs.append('--daemon-pipefds=%s' % lockpath)
474 474 # Don't pass --cwd to the child process, because we've already
475 475 # changed directory.
476 476 for i in xrange(1, len(runargs)):
477 477 if runargs[i].startswith('--cwd='):
478 478 del runargs[i]
479 479 break
480 480 elif runargs[i].startswith('--cwd'):
481 481 del runargs[i:i + 2]
482 482 break
483 483 def condfn():
484 484 return not os.path.exists(lockpath)
485 485 pid = util.rundetached(runargs, condfn)
486 486 if pid < 0:
487 487 raise util.Abort(_('child process failed to start'))
488 488 finally:
489 489 try:
490 490 os.unlink(lockpath)
491 491 except OSError, e:
492 492 if e.errno != errno.ENOENT:
493 493 raise
494 494 if parentfn:
495 495 return parentfn(pid)
496 496 else:
497 497 return
498 498
499 499 if initfn:
500 500 initfn()
501 501
502 502 if opts['pid_file']:
503 503 mode = appendpid and 'a' or 'w'
504 504 fp = open(opts['pid_file'], mode)
505 505 fp.write(str(os.getpid()) + '\n')
506 506 fp.close()
507 507
508 508 if opts['daemon_pipefds']:
509 509 lockpath = opts['daemon_pipefds']
510 510 try:
511 511 os.setsid()
512 512 except AttributeError:
513 513 pass
514 514 os.unlink(lockpath)
515 515 util.hidewindow()
516 516 sys.stdout.flush()
517 517 sys.stderr.flush()
518 518
519 519 nullfd = os.open(os.devnull, os.O_RDWR)
520 520 logfilefd = nullfd
521 521 if logfile:
522 522 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
523 523 os.dup2(nullfd, 0)
524 524 os.dup2(logfilefd, 1)
525 525 os.dup2(logfilefd, 2)
526 526 if nullfd not in (0, 1, 2):
527 527 os.close(nullfd)
528 528 if logfile and logfilefd not in (0, 1, 2):
529 529 os.close(logfilefd)
530 530
531 531 if runfn:
532 532 return runfn()
533 533
534 534 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
535 535 opts=None):
536 536 '''export changesets as hg patches.'''
537 537
538 538 total = len(revs)
539 539 revwidth = max([len(str(rev)) for rev in revs])
540 540
541 541 def single(rev, seqno, fp):
542 542 ctx = repo[rev]
543 543 node = ctx.node()
544 544 parents = [p.node() for p in ctx.parents() if p]
545 545 branch = ctx.branch()
546 546 if switch_parent:
547 547 parents.reverse()
548 548 prev = (parents and parents[0]) or nullid
549 549
550 550 shouldclose = False
551 551 if not fp and len(template) > 0:
552 552 desc_lines = ctx.description().rstrip().split('\n')
553 553 desc = desc_lines[0] #Commit always has a first line.
554 554 fp = makefileobj(repo, template, node, desc=desc, total=total,
555 555 seqno=seqno, revwidth=revwidth, mode='ab')
556 556 if fp != template:
557 557 shouldclose = True
558 558 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
559 559 repo.ui.note("%s\n" % fp.name)
560 560
561 561 if not fp:
562 562 write = repo.ui.write
563 563 else:
564 564 def write(s, **kw):
565 565 fp.write(s)
566 566
567 567
568 568 write("# HG changeset patch\n")
569 569 write("# User %s\n" % ctx.user())
570 570 write("# Date %d %d\n" % ctx.date())
571 571 if branch and branch != 'default':
572 572 write("# Branch %s\n" % branch)
573 573 write("# Node ID %s\n" % hex(node))
574 574 write("# Parent %s\n" % hex(prev))
575 575 if len(parents) > 1:
576 576 write("# Parent %s\n" % hex(parents[1]))
577 577 write(ctx.description().rstrip())
578 578 write("\n\n")
579 579
580 580 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
581 581 write(chunk, label=label)
582 582
583 583 if shouldclose:
584 584 fp.close()
585 585
586 586 for seqno, rev in enumerate(revs):
587 587 single(rev, seqno + 1, fp)
588 588
589 589 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
590 590 changes=None, stat=False, fp=None, prefix='',
591 591 listsubrepos=False):
592 592 '''show diff or diffstat.'''
593 593 if fp is None:
594 594 write = ui.write
595 595 else:
596 596 def write(s, **kw):
597 597 fp.write(s)
598 598
599 599 if stat:
600 600 diffopts = diffopts.copy(context=0)
601 601 width = 80
602 602 if not ui.plain():
603 603 width = ui.termwidth()
604 604 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
605 605 prefix=prefix)
606 606 for chunk, label in patch.diffstatui(util.iterlines(chunks),
607 607 width=width,
608 608 git=diffopts.git):
609 609 write(chunk, label=label)
610 610 else:
611 611 for chunk, label in patch.diffui(repo, node1, node2, match,
612 612 changes, diffopts, prefix=prefix):
613 613 write(chunk, label=label)
614 614
615 615 if listsubrepos:
616 616 ctx1 = repo[node1]
617 617 ctx2 = repo[node2]
618 618 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
619 619 tempnode2 = node2
620 620 try:
621 621 if node2 is not None:
622 622 tempnode2 = ctx2.substate[subpath][1]
623 623 except KeyError:
624 624 # A subrepo that existed in node1 was deleted between node1 and
625 625 # node2 (inclusive). Thus, ctx2's substate won't contain that
626 626 # subpath. The best we can do is to ignore it.
627 627 tempnode2 = None
628 628 submatch = matchmod.narrowmatcher(subpath, match)
629 629 sub.diff(diffopts, tempnode2, submatch, changes=changes,
630 630 stat=stat, fp=fp, prefix=prefix)
631 631
632 632 class changeset_printer(object):
633 633 '''show changeset information when templating not requested.'''
634 634
635 635 def __init__(self, ui, repo, patch, diffopts, buffered):
636 636 self.ui = ui
637 637 self.repo = repo
638 638 self.buffered = buffered
639 639 self.patch = patch
640 640 self.diffopts = diffopts
641 641 self.header = {}
642 642 self.hunk = {}
643 643 self.lastheader = None
644 644 self.footer = None
645 645
646 646 def flush(self, rev):
647 647 if rev in self.header:
648 648 h = self.header[rev]
649 649 if h != self.lastheader:
650 650 self.lastheader = h
651 651 self.ui.write(h)
652 652 del self.header[rev]
653 653 if rev in self.hunk:
654 654 self.ui.write(self.hunk[rev])
655 655 del self.hunk[rev]
656 656 return 1
657 657 return 0
658 658
659 659 def close(self):
660 660 if self.footer:
661 661 self.ui.write(self.footer)
662 662
663 663 def show(self, ctx, copies=None, matchfn=None, **props):
664 664 if self.buffered:
665 665 self.ui.pushbuffer()
666 666 self._show(ctx, copies, matchfn, props)
667 667 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
668 668 else:
669 669 self._show(ctx, copies, matchfn, props)
670 670
671 671 def _show(self, ctx, copies, matchfn, props):
672 672 '''show a single changeset or file revision'''
673 673 changenode = ctx.node()
674 674 rev = ctx.rev()
675 675
676 676 if self.ui.quiet:
677 677 self.ui.write("%d:%s\n" % (rev, short(changenode)),
678 678 label='log.node')
679 679 return
680 680
681 681 log = self.repo.changelog
682 682 date = util.datestr(ctx.date())
683 683
684 684 hexfunc = self.ui.debugflag and hex or short
685 685
686 686 parents = [(p, hexfunc(log.node(p)))
687 687 for p in self._meaningful_parentrevs(log, rev)]
688 688
689 689 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
690 690 label='log.changeset')
691 691
692 692 branch = ctx.branch()
693 693 # don't show the default branch name
694 694 if branch != 'default':
695 695 self.ui.write(_("branch: %s\n") % branch,
696 696 label='log.branch')
697 697 for bookmark in self.repo.nodebookmarks(changenode):
698 698 self.ui.write(_("bookmark: %s\n") % bookmark,
699 699 label='log.bookmark')
700 700 for tag in self.repo.nodetags(changenode):
701 701 self.ui.write(_("tag: %s\n") % tag,
702 702 label='log.tag')
703 703 if self.ui.debugflag and ctx.phase():
704 704 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
705 705 label='log.phase')
706 706 for parent in parents:
707 707 self.ui.write(_("parent: %d:%s\n") % parent,
708 708 label='log.parent')
709 709
710 710 if self.ui.debugflag:
711 711 mnode = ctx.manifestnode()
712 712 self.ui.write(_("manifest: %d:%s\n") %
713 713 (self.repo.manifest.rev(mnode), hex(mnode)),
714 714 label='ui.debug log.manifest')
715 715 self.ui.write(_("user: %s\n") % ctx.user(),
716 716 label='log.user')
717 717 self.ui.write(_("date: %s\n") % date,
718 718 label='log.date')
719 719
720 720 if self.ui.debugflag:
721 721 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
722 722 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
723 723 files):
724 724 if value:
725 725 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
726 726 label='ui.debug log.files')
727 727 elif ctx.files() and self.ui.verbose:
728 728 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
729 729 label='ui.note log.files')
730 730 if copies and self.ui.verbose:
731 731 copies = ['%s (%s)' % c for c in copies]
732 732 self.ui.write(_("copies: %s\n") % ' '.join(copies),
733 733 label='ui.note log.copies')
734 734
735 735 extra = ctx.extra()
736 736 if extra and self.ui.debugflag:
737 737 for key, value in sorted(extra.items()):
738 738 self.ui.write(_("extra: %s=%s\n")
739 739 % (key, value.encode('string_escape')),
740 740 label='ui.debug log.extra')
741 741
742 742 description = ctx.description().strip()
743 743 if description:
744 744 if self.ui.verbose:
745 745 self.ui.write(_("description:\n"),
746 746 label='ui.note log.description')
747 747 self.ui.write(description,
748 748 label='ui.note log.description')
749 749 self.ui.write("\n\n")
750 750 else:
751 751 self.ui.write(_("summary: %s\n") %
752 752 description.splitlines()[0],
753 753 label='log.summary')
754 754 self.ui.write("\n")
755 755
756 756 self.showpatch(changenode, matchfn)
757 757
758 758 def showpatch(self, node, matchfn):
759 759 if not matchfn:
760 760 matchfn = self.patch
761 761 if matchfn:
762 762 stat = self.diffopts.get('stat')
763 763 diff = self.diffopts.get('patch')
764 764 diffopts = patch.diffopts(self.ui, self.diffopts)
765 765 prev = self.repo.changelog.parents(node)[0]
766 766 if stat:
767 767 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
768 768 match=matchfn, stat=True)
769 769 if diff:
770 770 if stat:
771 771 self.ui.write("\n")
772 772 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
773 773 match=matchfn, stat=False)
774 774 self.ui.write("\n")
775 775
776 776 def _meaningful_parentrevs(self, log, rev):
777 777 """Return list of meaningful (or all if debug) parentrevs for rev.
778 778
779 779 For merges (two non-nullrev revisions) both parents are meaningful.
780 780 Otherwise the first parent revision is considered meaningful if it
781 781 is not the preceding revision.
782 782 """
783 783 parents = log.parentrevs(rev)
784 784 if not self.ui.debugflag and parents[1] == nullrev:
785 785 if parents[0] >= rev - 1:
786 786 parents = []
787 787 else:
788 788 parents = [parents[0]]
789 789 return parents
790 790
791 791
792 792 class changeset_templater(changeset_printer):
793 793 '''format changeset information.'''
794 794
795 795 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
796 796 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
797 797 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
798 798 defaulttempl = {
799 799 'parent': '{rev}:{node|formatnode} ',
800 800 'manifest': '{rev}:{node|formatnode}',
801 801 'file_copy': '{name} ({source})',
802 802 'extra': '{key}={value|stringescape}'
803 803 }
804 804 # filecopy is preserved for compatibility reasons
805 805 defaulttempl['filecopy'] = defaulttempl['file_copy']
806 806 self.t = templater.templater(mapfile, {'formatnode': formatnode},
807 807 cache=defaulttempl)
808 808 self.cache = {}
809 809
810 810 def use_template(self, t):
811 811 '''set template string to use'''
812 812 self.t.cache['changeset'] = t
813 813
814 814 def _meaningful_parentrevs(self, ctx):
815 815 """Return list of meaningful (or all if debug) parentrevs for rev.
816 816 """
817 817 parents = ctx.parents()
818 818 if len(parents) > 1:
819 819 return parents
820 820 if self.ui.debugflag:
821 821 return [parents[0], self.repo['null']]
822 822 if parents[0].rev() >= ctx.rev() - 1:
823 823 return []
824 824 return parents
825 825
826 826 def _show(self, ctx, copies, matchfn, props):
827 827 '''show a single changeset or file revision'''
828 828
829 829 showlist = templatekw.showlist
830 830
831 831 # showparents() behaviour depends on ui trace level which
832 832 # causes unexpected behaviours at templating level and makes
833 833 # it harder to extract it in a standalone function. Its
834 834 # behaviour cannot be changed so leave it here for now.
835 835 def showparents(**args):
836 836 ctx = args['ctx']
837 837 parents = [[('rev', p.rev()), ('node', p.hex())]
838 838 for p in self._meaningful_parentrevs(ctx)]
839 839 return showlist('parent', parents, **args)
840 840
841 841 props = props.copy()
842 842 props.update(templatekw.keywords)
843 843 props['parents'] = showparents
844 844 props['templ'] = self.t
845 845 props['ctx'] = ctx
846 846 props['repo'] = self.repo
847 847 props['revcache'] = {'copies': copies}
848 848 props['cache'] = self.cache
849 849
850 850 # find correct templates for current mode
851 851
852 852 tmplmodes = [
853 853 (True, None),
854 854 (self.ui.verbose, 'verbose'),
855 855 (self.ui.quiet, 'quiet'),
856 856 (self.ui.debugflag, 'debug'),
857 857 ]
858 858
859 859 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
860 860 for mode, postfix in tmplmodes:
861 861 for type in types:
862 862 cur = postfix and ('%s_%s' % (type, postfix)) or type
863 863 if mode and cur in self.t:
864 864 types[type] = cur
865 865
866 866 try:
867 867
868 868 # write header
869 869 if types['header']:
870 870 h = templater.stringify(self.t(types['header'], **props))
871 871 if self.buffered:
872 872 self.header[ctx.rev()] = h
873 873 else:
874 874 if self.lastheader != h:
875 875 self.lastheader = h
876 876 self.ui.write(h)
877 877
878 878 # write changeset metadata, then patch if requested
879 879 key = types['changeset']
880 880 self.ui.write(templater.stringify(self.t(key, **props)))
881 881 self.showpatch(ctx.node(), matchfn)
882 882
883 883 if types['footer']:
884 884 if not self.footer:
885 885 self.footer = templater.stringify(self.t(types['footer'],
886 886 **props))
887 887
888 888 except KeyError, inst:
889 889 msg = _("%s: no key named '%s'")
890 890 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
891 891 except SyntaxError, inst:
892 892 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
893 893
894 894 def show_changeset(ui, repo, opts, buffered=False):
895 895 """show one changeset using template or regular display.
896 896
897 897 Display format will be the first non-empty hit of:
898 898 1. option 'template'
899 899 2. option 'style'
900 900 3. [ui] setting 'logtemplate'
901 901 4. [ui] setting 'style'
902 902 If all of these values are either the unset or the empty string,
903 903 regular display via changeset_printer() is done.
904 904 """
905 905 # options
906 906 patch = False
907 907 if opts.get('patch') or opts.get('stat'):
908 908 patch = scmutil.matchall(repo)
909 909
910 910 tmpl = opts.get('template')
911 911 style = None
912 912 if tmpl:
913 913 tmpl = templater.parsestring(tmpl, quoted=False)
914 914 else:
915 915 style = opts.get('style')
916 916
917 917 # ui settings
918 918 if not (tmpl or style):
919 919 tmpl = ui.config('ui', 'logtemplate')
920 920 if tmpl:
921 921 try:
922 922 tmpl = templater.parsestring(tmpl)
923 923 except SyntaxError:
924 924 tmpl = templater.parsestring(tmpl, quoted=False)
925 925 else:
926 926 style = util.expandpath(ui.config('ui', 'style', ''))
927 927
928 928 if not (tmpl or style):
929 929 return changeset_printer(ui, repo, patch, opts, buffered)
930 930
931 931 mapfile = None
932 932 if style and not tmpl:
933 933 mapfile = style
934 934 if not os.path.split(mapfile)[0]:
935 935 mapname = (templater.templatepath('map-cmdline.' + mapfile)
936 936 or templater.templatepath(mapfile))
937 937 if mapname:
938 938 mapfile = mapname
939 939
940 940 try:
941 941 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
942 942 except SyntaxError, inst:
943 943 raise util.Abort(inst.args[0])
944 944 if tmpl:
945 945 t.use_template(tmpl)
946 946 return t
947 947
948 948 def finddate(ui, repo, date):
949 949 """Find the tipmost changeset that matches the given date spec"""
950 950
951 951 df = util.matchdate(date)
952 952 m = scmutil.matchall(repo)
953 953 results = {}
954 954
955 955 def prep(ctx, fns):
956 956 d = ctx.date()
957 957 if df(d[0]):
958 958 results[ctx.rev()] = d
959 959
960 960 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
961 961 rev = ctx.rev()
962 962 if rev in results:
963 963 ui.status(_("found revision %s from %s\n") %
964 964 (rev, util.datestr(results[rev])))
965 965 return str(rev)
966 966
967 967 raise util.Abort(_("revision matching date not found"))
968 968
969 969 def increasingwindows(start, end, windowsize=8, sizelimit=512):
970 970 if start < end:
971 971 while start < end:
972 972 yield start, min(windowsize, end - start)
973 973 start += windowsize
974 974 if windowsize < sizelimit:
975 975 windowsize *= 2
976 976 else:
977 977 while start > end:
978 978 yield start, min(windowsize, start - end - 1)
979 979 start -= windowsize
980 980 if windowsize < sizelimit:
981 981 windowsize *= 2
982 982
983 983 def walkchangerevs(repo, match, opts, prepare):
984 984 '''Iterate over files and the revs in which they changed.
985 985
986 986 Callers most commonly need to iterate backwards over the history
987 987 in which they are interested. Doing so has awful (quadratic-looking)
988 988 performance, so we use iterators in a "windowed" way.
989 989
990 990 We walk a window of revisions in the desired order. Within the
991 991 window, we first walk forwards to gather data, then in the desired
992 992 order (usually backwards) to display it.
993 993
994 994 This function returns an iterator yielding contexts. Before
995 995 yielding each context, the iterator will first call the prepare
996 996 function on each context in the window in forward order.'''
997 997
998 998 follow = opts.get('follow') or opts.get('follow_first')
999 999
1000 1000 if not len(repo):
1001 1001 return []
1002 1002
1003 1003 if follow:
1004 1004 defrange = '%s:0' % repo['.'].rev()
1005 1005 else:
1006 1006 defrange = '-1:0'
1007 1007 revs = scmutil.revrange(repo, opts.get('rev') or [defrange])
1008 1008 if not revs:
1009 1009 return []
1010 1010 wanted = set()
1011 1011 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1012 1012 fncache = {}
1013 1013 change = repo.changectx
1014 1014
1015 1015 # First step is to fill wanted, the set of revisions that we want to yield.
1016 1016 # When it does not induce extra cost, we also fill fncache for revisions in
1017 1017 # wanted: a cache of filenames that were changed (ctx.files()) and that
1018 1018 # match the file filtering conditions.
1019 1019
1020 1020 if not slowpath and not match.files():
1021 1021 # No files, no patterns. Display all revs.
1022 1022 wanted = set(revs)
1023 1023 copies = []
1024 1024
1025 1025 if not slowpath and match.files():
1026 1026 # We only have to read through the filelog to find wanted revisions
1027 1027
1028 1028 minrev, maxrev = min(revs), max(revs)
1029 1029 def filerevgen(filelog, last):
1030 1030 """
1031 1031 Only files, no patterns. Check the history of each file.
1032 1032
1033 1033 Examines filelog entries within minrev, maxrev linkrev range
1034 1034 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1035 1035 tuples in backwards order
1036 1036 """
1037 1037 cl_count = len(repo)
1038 1038 revs = []
1039 1039 for j in xrange(0, last + 1):
1040 1040 linkrev = filelog.linkrev(j)
1041 1041 if linkrev < minrev:
1042 1042 continue
1043 1043 # only yield rev for which we have the changelog, it can
1044 1044 # happen while doing "hg log" during a pull or commit
1045 1045 if linkrev >= cl_count:
1046 1046 break
1047 1047
1048 1048 parentlinkrevs = []
1049 1049 for p in filelog.parentrevs(j):
1050 1050 if p != nullrev:
1051 1051 parentlinkrevs.append(filelog.linkrev(p))
1052 1052 n = filelog.node(j)
1053 1053 revs.append((linkrev, parentlinkrevs,
1054 1054 follow and filelog.renamed(n)))
1055 1055
1056 1056 return reversed(revs)
1057 1057 def iterfiles():
1058 1058 pctx = repo['.']
1059 1059 for filename in match.files():
1060 1060 if follow:
1061 1061 if filename not in pctx:
1062 1062 raise util.Abort(_('cannot follow file not in parent '
1063 1063 'revision: "%s"') % filename)
1064 1064 yield filename, pctx[filename].filenode()
1065 1065 else:
1066 1066 yield filename, None
1067 1067 for filename_node in copies:
1068 1068 yield filename_node
1069 1069 for file_, node in iterfiles():
1070 1070 filelog = repo.file(file_)
1071 1071 if not len(filelog):
1072 1072 if node is None:
1073 1073 # A zero count may be a directory or deleted file, so
1074 1074 # try to find matching entries on the slow path.
1075 1075 if follow:
1076 1076 raise util.Abort(
1077 1077 _('cannot follow nonexistent file: "%s"') % file_)
1078 1078 slowpath = True
1079 1079 break
1080 1080 else:
1081 1081 continue
1082 1082
1083 1083 if node is None:
1084 1084 last = len(filelog) - 1
1085 1085 else:
1086 1086 last = filelog.rev(node)
1087 1087
1088 1088
1089 1089 # keep track of all ancestors of the file
1090 1090 ancestors = set([filelog.linkrev(last)])
1091 1091
1092 1092 # iterate from latest to oldest revision
1093 1093 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1094 1094 if not follow:
1095 1095 if rev > maxrev:
1096 1096 continue
1097 1097 else:
1098 1098 # Note that last might not be the first interesting
1099 1099 # rev to us:
1100 1100 # if the file has been changed after maxrev, we'll
1101 1101 # have linkrev(last) > maxrev, and we still need
1102 1102 # to explore the file graph
1103 1103 if rev not in ancestors:
1104 1104 continue
1105 1105 # XXX insert 1327 fix here
1106 1106 if flparentlinkrevs:
1107 1107 ancestors.update(flparentlinkrevs)
1108 1108
1109 1109 fncache.setdefault(rev, []).append(file_)
1110 1110 wanted.add(rev)
1111 1111 if copied:
1112 1112 copies.append(copied)
1113 1113 if slowpath:
1114 1114 # We have to read the changelog to match filenames against
1115 1115 # changed files
1116 1116
1117 1117 if follow:
1118 1118 raise util.Abort(_('can only follow copies/renames for explicit '
1119 1119 'filenames'))
1120 1120
1121 1121 # The slow path checks files modified in every changeset.
1122 1122 for i in sorted(revs):
1123 1123 ctx = change(i)
1124 1124 matches = filter(match, ctx.files())
1125 1125 if matches:
1126 1126 fncache[i] = matches
1127 1127 wanted.add(i)
1128 1128
1129 1129 class followfilter(object):
1130 1130 def __init__(self, onlyfirst=False):
1131 1131 self.startrev = nullrev
1132 1132 self.roots = set()
1133 1133 self.onlyfirst = onlyfirst
1134 1134
1135 1135 def match(self, rev):
1136 1136 def realparents(rev):
1137 1137 if self.onlyfirst:
1138 1138 return repo.changelog.parentrevs(rev)[0:1]
1139 1139 else:
1140 1140 return filter(lambda x: x != nullrev,
1141 1141 repo.changelog.parentrevs(rev))
1142 1142
1143 1143 if self.startrev == nullrev:
1144 1144 self.startrev = rev
1145 1145 return True
1146 1146
1147 1147 if rev > self.startrev:
1148 1148 # forward: all descendants
1149 1149 if not self.roots:
1150 1150 self.roots.add(self.startrev)
1151 1151 for parent in realparents(rev):
1152 1152 if parent in self.roots:
1153 1153 self.roots.add(rev)
1154 1154 return True
1155 1155 else:
1156 1156 # backwards: all parents
1157 1157 if not self.roots:
1158 1158 self.roots.update(realparents(self.startrev))
1159 1159 if rev in self.roots:
1160 1160 self.roots.remove(rev)
1161 1161 self.roots.update(realparents(rev))
1162 1162 return True
1163 1163
1164 1164 return False
1165 1165
1166 1166 # it might be worthwhile to do this in the iterator if the rev range
1167 1167 # is descending and the prune args are all within that range
1168 1168 for rev in opts.get('prune', ()):
1169 1169 rev = repo[rev].rev()
1170 1170 ff = followfilter()
1171 1171 stop = min(revs[0], revs[-1])
1172 1172 for x in xrange(rev, stop - 1, -1):
1173 1173 if ff.match(x):
1174 1174 wanted.discard(x)
1175 1175
1176 1176 # Now that wanted is correctly initialized, we can iterate over the
1177 1177 # revision range, yielding only revisions in wanted.
1178 1178 def iterate():
1179 1179 if follow and not match.files():
1180 1180 ff = followfilter(onlyfirst=opts.get('follow_first'))
1181 1181 def want(rev):
1182 1182 return ff.match(rev) and rev in wanted
1183 1183 else:
1184 1184 def want(rev):
1185 1185 return rev in wanted
1186 1186
1187 1187 for i, window in increasingwindows(0, len(revs)):
1188 1188 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1189 1189 for rev in sorted(nrevs):
1190 1190 fns = fncache.get(rev)
1191 1191 ctx = change(rev)
1192 1192 if not fns:
1193 1193 def fns_generator():
1194 1194 for f in ctx.files():
1195 1195 if match(f):
1196 1196 yield f
1197 1197 fns = fns_generator()
1198 1198 prepare(ctx, fns)
1199 1199 for rev in nrevs:
1200 1200 yield change(rev)
1201 1201 return iterate()
1202 1202
1203 1203 def _makegraphfilematcher(repo, pats, followfirst):
1204 1204 # When displaying a revision with --patch --follow FILE, we have
1205 1205 # to know which file of the revision must be diffed. With
1206 1206 # --follow, we want the names of the ancestors of FILE in the
1207 1207 # revision, stored in "fcache". "fcache" is populated by
1208 1208 # reproducing the graph traversal already done by --follow revset
1209 1209 # and relating linkrevs to file names (which is not "correct" but
1210 1210 # good enough).
1211 1211 fcache = {}
1212 1212 fcacheready = [False]
1213 1213 pctx = repo['.']
1214 1214 wctx = repo[None]
1215 1215
1216 1216 def populate():
1217 1217 for fn in pats:
1218 1218 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1219 1219 for c in i:
1220 1220 fcache.setdefault(c.linkrev(), set()).add(c.path())
1221 1221
1222 1222 def filematcher(rev):
1223 1223 if not fcacheready[0]:
1224 1224 # Lazy initialization
1225 1225 fcacheready[0] = True
1226 1226 populate()
1227 1227 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1228 1228
1229 1229 return filematcher
1230 1230
1231 1231 def _makegraphlogrevset(repo, pats, opts, revs):
1232 1232 """Return (expr, filematcher) where expr is a revset string built
1233 1233 from log options and file patterns or None. If --stat or --patch
1234 1234 are not passed filematcher is None. Otherwise it is a callable
1235 1235 taking a revision number and returning a match objects filtering
1236 1236 the files to be detailed when displaying the revision.
1237 1237 """
1238 1238 opt2revset = {
1239 1239 'no_merges': ('not merge()', None),
1240 1240 'only_merges': ('merge()', None),
1241 1241 '_ancestors': ('ancestors(%(val)s)', None),
1242 1242 '_fancestors': ('_firstancestors(%(val)s)', None),
1243 1243 '_descendants': ('descendants(%(val)s)', None),
1244 1244 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1245 1245 '_matchfiles': ('_matchfiles(%(val)s)', None),
1246 1246 'date': ('date(%(val)r)', None),
1247 1247 'branch': ('branch(%(val)r)', ' or '),
1248 1248 '_patslog': ('filelog(%(val)r)', ' or '),
1249 1249 '_patsfollow': ('follow(%(val)r)', ' or '),
1250 1250 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1251 1251 'keyword': ('keyword(%(val)r)', ' or '),
1252 1252 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1253 1253 'user': ('user(%(val)r)', ' or '),
1254 1254 }
1255 1255
1256 1256 opts = dict(opts)
1257 1257 # follow or not follow?
1258 1258 follow = opts.get('follow') or opts.get('follow_first')
1259 1259 followfirst = opts.get('follow_first') and 1 or 0
1260 1260 # --follow with FILE behaviour depends on revs...
1261 1261 startrev = revs[0]
1262 1262 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1263 1263
1264 1264 # branch and only_branch are really aliases and must be handled at
1265 1265 # the same time
1266 1266 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1267 1267 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1268 1268 # pats/include/exclude are passed to match.match() directly in
1269 1269 # _matchfiles() revset but walkchangerevs() builds its matcher with
1270 1270 # scmutil.match(). The difference is input pats are globbed on
1271 1271 # platforms without shell expansion (windows).
1272 1272 pctx = repo[None]
1273 1273 match, pats = scmutil.matchandpats(pctx, pats, opts)
1274 1274 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1275 1275 if not slowpath:
1276 1276 for f in match.files():
1277 1277 if follow and f not in pctx:
1278 1278 raise util.Abort(_('cannot follow file not in parent '
1279 1279 'revision: "%s"') % f)
1280 1280 filelog = repo.file(f)
1281 1281 if not len(filelog):
1282 1282 # A zero count may be a directory or deleted file, so
1283 1283 # try to find matching entries on the slow path.
1284 1284 if follow:
1285 1285 raise util.Abort(
1286 1286 _('cannot follow nonexistent file: "%s"') % f)
1287 1287 slowpath = True
1288 1288 if slowpath:
1289 1289 # See walkchangerevs() slow path.
1290 1290 #
1291 1291 if follow:
1292 1292 raise util.Abort(_('can only follow copies/renames for explicit '
1293 1293 'filenames'))
1294 1294 # pats/include/exclude cannot be represented as separate
1295 1295 # revset expressions as their filtering logic applies at file
1296 1296 # level. For instance "-I a -X a" matches a revision touching
1297 1297 # "a" and "b" while "file(a) and not file(b)" does
1298 1298 # not. Besides, filesets are evaluated against the working
1299 1299 # directory.
1300 1300 matchargs = ['r:', 'd:relpath']
1301 1301 for p in pats:
1302 1302 matchargs.append('p:' + p)
1303 1303 for p in opts.get('include', []):
1304 1304 matchargs.append('i:' + p)
1305 1305 for p in opts.get('exclude', []):
1306 1306 matchargs.append('x:' + p)
1307 1307 matchargs = ','.join(('%r' % p) for p in matchargs)
1308 1308 opts['_matchfiles'] = matchargs
1309 1309 else:
1310 1310 if follow:
1311 1311 fpats = ('_patsfollow', '_patsfollowfirst')
1312 1312 fnopats = (('_ancestors', '_fancestors'),
1313 1313 ('_descendants', '_fdescendants'))
1314 1314 if pats:
1315 1315 # follow() revset interprets its file argument as a
1316 1316 # manifest entry, so use match.files(), not pats.
1317 1317 opts[fpats[followfirst]] = list(match.files())
1318 1318 else:
1319 1319 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1320 1320 else:
1321 1321 opts['_patslog'] = list(pats)
1322 1322
1323 1323 filematcher = None
1324 1324 if opts.get('patch') or opts.get('stat'):
1325 1325 if follow:
1326 1326 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1327 1327 else:
1328 1328 filematcher = lambda rev: match
1329 1329
1330 1330 expr = []
1331 1331 for op, val in opts.iteritems():
1332 1332 if not val:
1333 1333 continue
1334 1334 if op not in opt2revset:
1335 1335 continue
1336 1336 revop, andor = opt2revset[op]
1337 1337 if '%(val)' not in revop:
1338 1338 expr.append(revop)
1339 1339 else:
1340 1340 if not isinstance(val, list):
1341 1341 e = revop % {'val': val}
1342 1342 else:
1343 1343 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1344 1344 expr.append(e)
1345 1345
1346 1346 if expr:
1347 1347 expr = '(' + ' and '.join(expr) + ')'
1348 1348 else:
1349 1349 expr = None
1350 1350 return expr, filematcher
1351 1351
1352 1352 def getgraphlogrevs(repo, pats, opts):
1353 1353 """Return (revs, expr, filematcher) where revs is an iterable of
1354 1354 revision numbers, expr is a revset string built from log options
1355 1355 and file patterns or None, and used to filter 'revs'. If --stat or
1356 1356 --patch are not passed filematcher is None. Otherwise it is a
1357 1357 callable taking a revision number and returning a match objects
1358 1358 filtering the files to be detailed when displaying the revision.
1359 1359 """
1360 1360 def increasingrevs(repo, revs, matcher):
1361 1361 # The sorted input rev sequence is chopped in sub-sequences
1362 1362 # which are sorted in ascending order and passed to the
1363 1363 # matcher. The filtered revs are sorted again as they were in
1364 1364 # the original sub-sequence. This achieve several things:
1365 1365 #
1366 1366 # - getlogrevs() now returns a generator which behaviour is
1367 1367 # adapted to log need. First results come fast, last ones
1368 1368 # are batched for performances.
1369 1369 #
1370 1370 # - revset matchers often operate faster on revision in
1371 1371 # changelog order, because most filters deal with the
1372 1372 # changelog.
1373 1373 #
1374 1374 # - revset matchers can reorder revisions. "A or B" typically
1375 1375 # returns returns the revision matching A then the revision
1376 1376 # matching B. We want to hide this internal implementation
1377 1377 # detail from the caller, and sorting the filtered revision
1378 1378 # again achieves this.
1379 1379 for i, window in increasingwindows(0, len(revs), windowsize=1):
1380 1380 orevs = revs[i:i + window]
1381 1381 nrevs = set(matcher(repo, sorted(orevs)))
1382 1382 for rev in orevs:
1383 1383 if rev in nrevs:
1384 1384 yield rev
1385 1385
1386 1386 if not len(repo):
1387 1387 return iter([]), None, None
1388 1388 # Default --rev value depends on --follow but --follow behaviour
1389 1389 # depends on revisions resolved from --rev...
1390 1390 follow = opts.get('follow') or opts.get('follow_first')
1391 1391 if opts.get('rev'):
1392 1392 revs = scmutil.revrange(repo, opts['rev'])
1393 1393 else:
1394 1394 if follow and len(repo) > 0:
1395 1395 revs = scmutil.revrange(repo, ['.:0'])
1396 1396 else:
1397 revs = range(len(repo) - 1, -1, -1)
1397 revs = list(repo.changelog)
1398 revs.reverse()
1398 1399 if not revs:
1399 1400 return iter([]), None, None
1400 1401 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1401 1402 if expr:
1402 1403 matcher = revset.match(repo.ui, expr)
1403 1404 revs = increasingrevs(repo, revs, matcher)
1404 1405 if not opts.get('hidden'):
1405 1406 # --hidden is still experimental and not worth a dedicated revset
1406 1407 # yet. Fortunately, filtering revision number is fast.
1407 1408 revs = (r for r in revs if r not in repo.hiddenrevs)
1408 1409 else:
1409 1410 revs = iter(revs)
1410 1411 return revs, expr, filematcher
1411 1412
1412 1413 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1413 1414 filematcher=None):
1414 1415 seen, state = [], graphmod.asciistate()
1415 1416 for rev, type, ctx, parents in dag:
1416 1417 char = 'o'
1417 1418 if ctx.node() in showparents:
1418 1419 char = '@'
1419 1420 elif ctx.obsolete():
1420 1421 char = 'x'
1421 1422 copies = None
1422 1423 if getrenamed and ctx.rev():
1423 1424 copies = []
1424 1425 for fn in ctx.files():
1425 1426 rename = getrenamed(fn, ctx.rev())
1426 1427 if rename:
1427 1428 copies.append((fn, rename[0]))
1428 1429 revmatchfn = None
1429 1430 if filematcher is not None:
1430 1431 revmatchfn = filematcher(ctx.rev())
1431 1432 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1432 1433 lines = displayer.hunk.pop(rev).split('\n')
1433 1434 if not lines[-1]:
1434 1435 del lines[-1]
1435 1436 displayer.flush(rev)
1436 1437 edges = edgefn(type, char, lines, seen, rev, parents)
1437 1438 for type, char, lines, coldata in edges:
1438 1439 graphmod.ascii(ui, state, type, char, lines, coldata)
1439 1440 displayer.close()
1440 1441
1441 1442 def graphlog(ui, repo, *pats, **opts):
1442 1443 # Parameters are identical to log command ones
1443 1444 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1444 1445 revs = sorted(revs, reverse=1)
1445 1446 limit = loglimit(opts)
1446 1447 if limit is not None:
1447 1448 revs = revs[:limit]
1448 1449 revdag = graphmod.dagwalker(repo, revs)
1449 1450
1450 1451 getrenamed = None
1451 1452 if opts.get('copies'):
1452 1453 endrev = None
1453 1454 if opts.get('rev'):
1454 1455 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1455 1456 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1456 1457 displayer = show_changeset(ui, repo, opts, buffered=True)
1457 1458 showparents = [ctx.node() for ctx in repo[None].parents()]
1458 1459 displaygraph(ui, revdag, displayer, showparents,
1459 1460 graphmod.asciiedges, getrenamed, filematcher)
1460 1461
1461 1462 def checkunsupportedgraphflags(pats, opts):
1462 1463 for op in ["newest_first"]:
1463 1464 if op in opts and opts[op]:
1464 1465 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1465 1466 % op.replace("_", "-"))
1466 1467
1467 1468 def graphrevs(repo, nodes, opts):
1468 1469 limit = loglimit(opts)
1469 1470 nodes.reverse()
1470 1471 if limit is not None:
1471 1472 nodes = nodes[:limit]
1472 1473 return graphmod.nodes(repo, nodes)
1473 1474
1474 1475 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1475 1476 join = lambda f: os.path.join(prefix, f)
1476 1477 bad = []
1477 1478 oldbad = match.bad
1478 1479 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1479 1480 names = []
1480 1481 wctx = repo[None]
1481 1482 cca = None
1482 1483 abort, warn = scmutil.checkportabilityalert(ui)
1483 1484 if abort or warn:
1484 1485 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1485 1486 for f in repo.walk(match):
1486 1487 exact = match.exact(f)
1487 1488 if exact or not explicitonly and f not in repo.dirstate:
1488 1489 if cca:
1489 1490 cca(f)
1490 1491 names.append(f)
1491 1492 if ui.verbose or not exact:
1492 1493 ui.status(_('adding %s\n') % match.rel(join(f)))
1493 1494
1494 1495 for subpath in wctx.substate:
1495 1496 sub = wctx.sub(subpath)
1496 1497 try:
1497 1498 submatch = matchmod.narrowmatcher(subpath, match)
1498 1499 if listsubrepos:
1499 1500 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1500 1501 False))
1501 1502 else:
1502 1503 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1503 1504 True))
1504 1505 except error.LookupError:
1505 1506 ui.status(_("skipping missing subrepository: %s\n")
1506 1507 % join(subpath))
1507 1508
1508 1509 if not dryrun:
1509 1510 rejected = wctx.add(names, prefix)
1510 1511 bad.extend(f for f in rejected if f in match.files())
1511 1512 return bad
1512 1513
1513 1514 def forget(ui, repo, match, prefix, explicitonly):
1514 1515 join = lambda f: os.path.join(prefix, f)
1515 1516 bad = []
1516 1517 oldbad = match.bad
1517 1518 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1518 1519 wctx = repo[None]
1519 1520 forgot = []
1520 1521 s = repo.status(match=match, clean=True)
1521 1522 forget = sorted(s[0] + s[1] + s[3] + s[6])
1522 1523 if explicitonly:
1523 1524 forget = [f for f in forget if match.exact(f)]
1524 1525
1525 1526 for subpath in wctx.substate:
1526 1527 sub = wctx.sub(subpath)
1527 1528 try:
1528 1529 submatch = matchmod.narrowmatcher(subpath, match)
1529 1530 subbad, subforgot = sub.forget(ui, submatch, prefix)
1530 1531 bad.extend([subpath + '/' + f for f in subbad])
1531 1532 forgot.extend([subpath + '/' + f for f in subforgot])
1532 1533 except error.LookupError:
1533 1534 ui.status(_("skipping missing subrepository: %s\n")
1534 1535 % join(subpath))
1535 1536
1536 1537 if not explicitonly:
1537 1538 for f in match.files():
1538 1539 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1539 1540 if f not in forgot:
1540 1541 if os.path.exists(match.rel(join(f))):
1541 1542 ui.warn(_('not removing %s: '
1542 1543 'file is already untracked\n')
1543 1544 % match.rel(join(f)))
1544 1545 bad.append(f)
1545 1546
1546 1547 for f in forget:
1547 1548 if ui.verbose or not match.exact(f):
1548 1549 ui.status(_('removing %s\n') % match.rel(join(f)))
1549 1550
1550 1551 rejected = wctx.forget(forget, prefix)
1551 1552 bad.extend(f for f in rejected if f in match.files())
1552 1553 forgot.extend(forget)
1553 1554 return bad, forgot
1554 1555
1555 1556 def duplicatecopies(repo, rev, p1):
1556 1557 "Reproduce copies found in the source revision in the dirstate for grafts"
1557 1558 for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems():
1558 1559 repo.dirstate.copy(src, dst)
1559 1560
1560 1561 def commit(ui, repo, commitfunc, pats, opts):
1561 1562 '''commit the specified files or all outstanding changes'''
1562 1563 date = opts.get('date')
1563 1564 if date:
1564 1565 opts['date'] = util.parsedate(date)
1565 1566 message = logmessage(ui, opts)
1566 1567
1567 1568 # extract addremove carefully -- this function can be called from a command
1568 1569 # that doesn't support addremove
1569 1570 if opts.get('addremove'):
1570 1571 scmutil.addremove(repo, pats, opts)
1571 1572
1572 1573 return commitfunc(ui, repo, message,
1573 1574 scmutil.match(repo[None], pats, opts), opts)
1574 1575
1575 1576 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1576 1577 ui.note(_('amending changeset %s\n') % old)
1577 1578 base = old.p1()
1578 1579
1579 1580 wlock = lock = None
1580 1581 try:
1581 1582 wlock = repo.wlock()
1582 1583 lock = repo.lock()
1583 1584 tr = repo.transaction('amend')
1584 1585 try:
1585 1586 # See if we got a message from -m or -l, if not, open the editor
1586 1587 # with the message of the changeset to amend
1587 1588 message = logmessage(ui, opts)
1588 1589 # First, do a regular commit to record all changes in the working
1589 1590 # directory (if there are any)
1590 1591 ui.callhooks = False
1591 1592 try:
1592 1593 opts['message'] = 'temporary amend commit for %s' % old
1593 1594 node = commit(ui, repo, commitfunc, pats, opts)
1594 1595 finally:
1595 1596 ui.callhooks = True
1596 1597 ctx = repo[node]
1597 1598
1598 1599 # Participating changesets:
1599 1600 #
1600 1601 # node/ctx o - new (intermediate) commit that contains changes
1601 1602 # | from working dir to go into amending commit
1602 1603 # | (or a workingctx if there were no changes)
1603 1604 # |
1604 1605 # old o - changeset to amend
1605 1606 # |
1606 1607 # base o - parent of amending changeset
1607 1608
1608 1609 # Update extra dict from amended commit (e.g. to preserve graft
1609 1610 # source)
1610 1611 extra.update(old.extra())
1611 1612
1612 1613 # Also update it from the intermediate commit or from the wctx
1613 1614 extra.update(ctx.extra())
1614 1615
1615 1616 files = set(old.files())
1616 1617
1617 1618 # Second, we use either the commit we just did, or if there were no
1618 1619 # changes the parent of the working directory as the version of the
1619 1620 # files in the final amend commit
1620 1621 if node:
1621 1622 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1622 1623
1623 1624 user = ctx.user()
1624 1625 date = ctx.date()
1625 1626 # Recompute copies (avoid recording a -> b -> a)
1626 1627 copied = copies.pathcopies(base, ctx)
1627 1628
1628 1629 # Prune files which were reverted by the updates: if old
1629 1630 # introduced file X and our intermediate commit, node,
1630 1631 # renamed that file, then those two files are the same and
1631 1632 # we can discard X from our list of files. Likewise if X
1632 1633 # was deleted, it's no longer relevant
1633 1634 files.update(ctx.files())
1634 1635
1635 1636 def samefile(f):
1636 1637 if f in ctx.manifest():
1637 1638 a = ctx.filectx(f)
1638 1639 if f in base.manifest():
1639 1640 b = base.filectx(f)
1640 1641 return (not a.cmp(b)
1641 1642 and a.flags() == b.flags())
1642 1643 else:
1643 1644 return False
1644 1645 else:
1645 1646 return f not in base.manifest()
1646 1647 files = [f for f in files if not samefile(f)]
1647 1648
1648 1649 def filectxfn(repo, ctx_, path):
1649 1650 try:
1650 1651 fctx = ctx[path]
1651 1652 flags = fctx.flags()
1652 1653 mctx = context.memfilectx(fctx.path(), fctx.data(),
1653 1654 islink='l' in flags,
1654 1655 isexec='x' in flags,
1655 1656 copied=copied.get(path))
1656 1657 return mctx
1657 1658 except KeyError:
1658 1659 raise IOError
1659 1660 else:
1660 1661 ui.note(_('copying changeset %s to %s\n') % (old, base))
1661 1662
1662 1663 # Use version of files as in the old cset
1663 1664 def filectxfn(repo, ctx_, path):
1664 1665 try:
1665 1666 return old.filectx(path)
1666 1667 except KeyError:
1667 1668 raise IOError
1668 1669
1669 1670 user = opts.get('user') or old.user()
1670 1671 date = opts.get('date') or old.date()
1671 1672 if not message:
1672 1673 message = old.description()
1673 1674
1674 1675 new = context.memctx(repo,
1675 1676 parents=[base.node(), nullid],
1676 1677 text=message,
1677 1678 files=files,
1678 1679 filectxfn=filectxfn,
1679 1680 user=user,
1680 1681 date=date,
1681 1682 extra=extra)
1682 1683 new._text = commitforceeditor(repo, new, [])
1683 1684 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1684 1685 try:
1685 1686 repo.ui.setconfig('phases', 'new-commit', old.phase())
1686 1687 newid = repo.commitctx(new)
1687 1688 finally:
1688 1689 repo.ui.setconfig('phases', 'new-commit', ph)
1689 1690 if newid != old.node():
1690 1691 # Reroute the working copy parent to the new changeset
1691 1692 repo.setparents(newid, nullid)
1692 1693
1693 1694 # Move bookmarks from old parent to amend commit
1694 1695 bms = repo.nodebookmarks(old.node())
1695 1696 if bms:
1696 1697 for bm in bms:
1697 1698 repo._bookmarks[bm] = newid
1698 1699 bookmarks.write(repo)
1699 1700 #commit the whole amend process
1700 1701 if obsolete._enabled and newid != old.node():
1701 1702 # mark the new changeset as successor of the rewritten one
1702 1703 new = repo[newid]
1703 1704 obs = [(old, (new,))]
1704 1705 if node:
1705 1706 obs.append((ctx, (new,)))
1706 1707
1707 1708 obsolete.createmarkers(repo, obs)
1708 1709 tr.close()
1709 1710 finally:
1710 1711 tr.release()
1711 1712 if (not obsolete._enabled) and newid != old.node():
1712 1713 # Strip the intermediate commit (if there was one) and the amended
1713 1714 # commit
1714 1715 if node:
1715 1716 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1716 1717 ui.note(_('stripping amended changeset %s\n') % old)
1717 1718 repair.strip(ui, repo, old.node(), topic='amend-backup')
1718 1719 finally:
1719 1720 lockmod.release(wlock, lock)
1720 1721 return newid
1721 1722
1722 1723 def commiteditor(repo, ctx, subs):
1723 1724 if ctx.description():
1724 1725 return ctx.description()
1725 1726 return commitforceeditor(repo, ctx, subs)
1726 1727
1727 1728 def commitforceeditor(repo, ctx, subs):
1728 1729 edittext = []
1729 1730 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1730 1731 if ctx.description():
1731 1732 edittext.append(ctx.description())
1732 1733 edittext.append("")
1733 1734 edittext.append("") # Empty line between message and comments.
1734 1735 edittext.append(_("HG: Enter commit message."
1735 1736 " Lines beginning with 'HG:' are removed."))
1736 1737 edittext.append(_("HG: Leave message empty to abort commit."))
1737 1738 edittext.append("HG: --")
1738 1739 edittext.append(_("HG: user: %s") % ctx.user())
1739 1740 if ctx.p2():
1740 1741 edittext.append(_("HG: branch merge"))
1741 1742 if ctx.branch():
1742 1743 edittext.append(_("HG: branch '%s'") % ctx.branch())
1743 1744 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1744 1745 edittext.extend([_("HG: added %s") % f for f in added])
1745 1746 edittext.extend([_("HG: changed %s") % f for f in modified])
1746 1747 edittext.extend([_("HG: removed %s") % f for f in removed])
1747 1748 if not added and not modified and not removed:
1748 1749 edittext.append(_("HG: no files changed"))
1749 1750 edittext.append("")
1750 1751 # run editor in the repository root
1751 1752 olddir = os.getcwd()
1752 1753 os.chdir(repo.root)
1753 1754 text = repo.ui.edit("\n".join(edittext), ctx.user())
1754 1755 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1755 1756 os.chdir(olddir)
1756 1757
1757 1758 if not text.strip():
1758 1759 raise util.Abort(_("empty commit message"))
1759 1760
1760 1761 return text
1761 1762
1762 1763 def revert(ui, repo, ctx, parents, *pats, **opts):
1763 1764 parent, p2 = parents
1764 1765 node = ctx.node()
1765 1766
1766 1767 mf = ctx.manifest()
1767 1768 if node == parent:
1768 1769 pmf = mf
1769 1770 else:
1770 1771 pmf = None
1771 1772
1772 1773 # need all matching names in dirstate and manifest of target rev,
1773 1774 # so have to walk both. do not print errors if files exist in one
1774 1775 # but not other.
1775 1776
1776 1777 names = {}
1777 1778
1778 1779 wlock = repo.wlock()
1779 1780 try:
1780 1781 # walk dirstate.
1781 1782
1782 1783 m = scmutil.match(repo[None], pats, opts)
1783 1784 m.bad = lambda x, y: False
1784 1785 for abs in repo.walk(m):
1785 1786 names[abs] = m.rel(abs), m.exact(abs)
1786 1787
1787 1788 # walk target manifest.
1788 1789
1789 1790 def badfn(path, msg):
1790 1791 if path in names:
1791 1792 return
1792 1793 if path in ctx.substate:
1793 1794 return
1794 1795 path_ = path + '/'
1795 1796 for f in names:
1796 1797 if f.startswith(path_):
1797 1798 return
1798 1799 ui.warn("%s: %s\n" % (m.rel(path), msg))
1799 1800
1800 1801 m = scmutil.match(ctx, pats, opts)
1801 1802 m.bad = badfn
1802 1803 for abs in ctx.walk(m):
1803 1804 if abs not in names:
1804 1805 names[abs] = m.rel(abs), m.exact(abs)
1805 1806
1806 1807 # get the list of subrepos that must be reverted
1807 1808 targetsubs = [s for s in ctx.substate if m(s)]
1808 1809 m = scmutil.matchfiles(repo, names)
1809 1810 changes = repo.status(match=m)[:4]
1810 1811 modified, added, removed, deleted = map(set, changes)
1811 1812
1812 1813 # if f is a rename, also revert the source
1813 1814 cwd = repo.getcwd()
1814 1815 for f in added:
1815 1816 src = repo.dirstate.copied(f)
1816 1817 if src and src not in names and repo.dirstate[src] == 'r':
1817 1818 removed.add(src)
1818 1819 names[src] = (repo.pathto(src, cwd), True)
1819 1820
1820 1821 def removeforget(abs):
1821 1822 if repo.dirstate[abs] == 'a':
1822 1823 return _('forgetting %s\n')
1823 1824 return _('removing %s\n')
1824 1825
1825 1826 revert = ([], _('reverting %s\n'))
1826 1827 add = ([], _('adding %s\n'))
1827 1828 remove = ([], removeforget)
1828 1829 undelete = ([], _('undeleting %s\n'))
1829 1830
1830 1831 disptable = (
1831 1832 # dispatch table:
1832 1833 # file state
1833 1834 # action if in target manifest
1834 1835 # action if not in target manifest
1835 1836 # make backup if in target manifest
1836 1837 # make backup if not in target manifest
1837 1838 (modified, revert, remove, True, True),
1838 1839 (added, revert, remove, True, False),
1839 1840 (removed, undelete, None, False, False),
1840 1841 (deleted, revert, remove, False, False),
1841 1842 )
1842 1843
1843 1844 for abs, (rel, exact) in sorted(names.items()):
1844 1845 mfentry = mf.get(abs)
1845 1846 target = repo.wjoin(abs)
1846 1847 def handle(xlist, dobackup):
1847 1848 xlist[0].append(abs)
1848 1849 if (dobackup and not opts.get('no_backup') and
1849 1850 os.path.lexists(target)):
1850 1851 bakname = "%s.orig" % rel
1851 1852 ui.note(_('saving current version of %s as %s\n') %
1852 1853 (rel, bakname))
1853 1854 if not opts.get('dry_run'):
1854 1855 util.rename(target, bakname)
1855 1856 if ui.verbose or not exact:
1856 1857 msg = xlist[1]
1857 1858 if not isinstance(msg, basestring):
1858 1859 msg = msg(abs)
1859 1860 ui.status(msg % rel)
1860 1861 for table, hitlist, misslist, backuphit, backupmiss in disptable:
1861 1862 if abs not in table:
1862 1863 continue
1863 1864 # file has changed in dirstate
1864 1865 if mfentry:
1865 1866 handle(hitlist, backuphit)
1866 1867 elif misslist is not None:
1867 1868 handle(misslist, backupmiss)
1868 1869 break
1869 1870 else:
1870 1871 if abs not in repo.dirstate:
1871 1872 if mfentry:
1872 1873 handle(add, True)
1873 1874 elif exact:
1874 1875 ui.warn(_('file not managed: %s\n') % rel)
1875 1876 continue
1876 1877 # file has not changed in dirstate
1877 1878 if node == parent:
1878 1879 if exact:
1879 1880 ui.warn(_('no changes needed to %s\n') % rel)
1880 1881 continue
1881 1882 if pmf is None:
1882 1883 # only need parent manifest in this unlikely case,
1883 1884 # so do not read by default
1884 1885 pmf = repo[parent].manifest()
1885 1886 if abs in pmf and mfentry:
1886 1887 # if version of file is same in parent and target
1887 1888 # manifests, do nothing
1888 1889 if (pmf[abs] != mfentry or
1889 1890 pmf.flags(abs) != mf.flags(abs)):
1890 1891 handle(revert, False)
1891 1892 else:
1892 1893 handle(remove, False)
1893 1894
1894 1895 if not opts.get('dry_run'):
1895 1896 def checkout(f):
1896 1897 fc = ctx[f]
1897 1898 repo.wwrite(f, fc.data(), fc.flags())
1898 1899
1899 1900 audit_path = scmutil.pathauditor(repo.root)
1900 1901 for f in remove[0]:
1901 1902 if repo.dirstate[f] == 'a':
1902 1903 repo.dirstate.drop(f)
1903 1904 continue
1904 1905 audit_path(f)
1905 1906 try:
1906 1907 util.unlinkpath(repo.wjoin(f))
1907 1908 except OSError:
1908 1909 pass
1909 1910 repo.dirstate.remove(f)
1910 1911
1911 1912 normal = None
1912 1913 if node == parent:
1913 1914 # We're reverting to our parent. If possible, we'd like status
1914 1915 # to report the file as clean. We have to use normallookup for
1915 1916 # merges to avoid losing information about merged/dirty files.
1916 1917 if p2 != nullid:
1917 1918 normal = repo.dirstate.normallookup
1918 1919 else:
1919 1920 normal = repo.dirstate.normal
1920 1921 for f in revert[0]:
1921 1922 checkout(f)
1922 1923 if normal:
1923 1924 normal(f)
1924 1925
1925 1926 for f in add[0]:
1926 1927 checkout(f)
1927 1928 repo.dirstate.add(f)
1928 1929
1929 1930 normal = repo.dirstate.normallookup
1930 1931 if node == parent and p2 == nullid:
1931 1932 normal = repo.dirstate.normal
1932 1933 for f in undelete[0]:
1933 1934 checkout(f)
1934 1935 normal(f)
1935 1936
1936 1937 if targetsubs:
1937 1938 # Revert the subrepos on the revert list
1938 1939 for sub in targetsubs:
1939 1940 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
1940 1941 finally:
1941 1942 wlock.release()
1942 1943
1943 1944 def command(table):
1944 1945 '''returns a function object bound to table which can be used as
1945 1946 a decorator for populating table as a command table'''
1946 1947
1947 1948 def cmd(name, options, synopsis=None):
1948 1949 def decorator(func):
1949 1950 if synopsis:
1950 1951 table[name] = func, options[:], synopsis
1951 1952 else:
1952 1953 table[name] = func, options[:]
1953 1954 return func
1954 1955 return decorator
1955 1956
1956 1957 return cmd
@@ -1,2610 +1,2609
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wvfs = scmutil.vfs(path, expand=True)
121 121 self.wopener = self.wvfs
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.vfs = scmutil.vfs(self.path)
127 127 self.opener = self.vfs
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 186 self.spath = self.store.path
187 187 self.svfs = self.store.vfs
188 188 self.sopener = self.svfs
189 189 self.sjoin = self.store.join
190 190 self.vfs.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 302 This set is carried on the repo to ease initialization and lazy
303 303 loading; it'll probably move back to changelog for efficiency and
304 304 consistency reasons.
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309
310 310 hidden changesets cannot have non-hidden descendants
311 311 """
312 312 hidden = set()
313 313 if self.obsstore:
314 314 ### hide extinct changeset that are not accessible by any mean
315 315 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
316 316 hidden.update(self.revs(hiddenquery))
317 317 return hidden
318 318
319 319 @storecache('00changelog.i')
320 320 def changelog(self):
321 321 c = changelog.changelog(self.sopener)
322 322 if 'HG_PENDING' in os.environ:
323 323 p = os.environ['HG_PENDING']
324 324 if p.startswith(self.root):
325 325 c.readpending('00changelog.i.a')
326 326 return c
327 327
328 328 @storecache('00manifest.i')
329 329 def manifest(self):
330 330 return manifest.manifest(self.sopener)
331 331
332 332 @filecache('dirstate')
333 333 def dirstate(self):
334 334 warned = [0]
335 335 def validate(node):
336 336 try:
337 337 self.changelog.rev(node)
338 338 return node
339 339 except error.LookupError:
340 340 if not warned[0]:
341 341 warned[0] = True
342 342 self.ui.warn(_("warning: ignoring unknown"
343 343 " working parent %s!\n") % short(node))
344 344 return nullid
345 345
346 346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347 347
348 348 def __getitem__(self, changeid):
349 349 if changeid is None:
350 350 return context.workingctx(self)
351 351 return context.changectx(self, changeid)
352 352
353 353 def __contains__(self, changeid):
354 354 try:
355 355 return bool(self.lookup(changeid))
356 356 except error.RepoLookupError:
357 357 return False
358 358
359 359 def __nonzero__(self):
360 360 return True
361 361
362 362 def __len__(self):
363 363 return len(self.changelog)
364 364
365 365 def __iter__(self):
366 for i in xrange(len(self)):
367 yield i
366 return iter(self.changelog)
368 367
369 368 def revs(self, expr, *args):
370 369 '''Return a list of revisions matching the given revset'''
371 370 expr = revset.formatspec(expr, *args)
372 371 m = revset.match(None, expr)
373 return [r for r in m(self, range(len(self)))]
372 return [r for r in m(self, list(self))]
374 373
375 374 def set(self, expr, *args):
376 375 '''
377 376 Yield a context for each matching revision, after doing arg
378 377 replacement via revset.formatspec
379 378 '''
380 379 for r in self.revs(expr, *args):
381 380 yield self[r]
382 381
383 382 def url(self):
384 383 return 'file:' + self.root
385 384
386 385 def hook(self, name, throw=False, **args):
387 386 return hook.hook(self.ui, self, name, throw, **args)
388 387
389 388 tag_disallowed = ':\r\n'
390 389
391 390 def _tag(self, names, node, message, local, user, date, extra={}):
392 391 if isinstance(names, str):
393 392 allchars = names
394 393 names = (names,)
395 394 else:
396 395 allchars = ''.join(names)
397 396 for c in self.tag_disallowed:
398 397 if c in allchars:
399 398 raise util.Abort(_('%r cannot be used in a tag name') % c)
400 399
401 400 branches = self.branchmap()
402 401 for name in names:
403 402 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 403 local=local)
405 404 if name in branches:
406 405 self.ui.warn(_("warning: tag %s conflicts with existing"
407 406 " branch name\n") % name)
408 407
409 408 def writetags(fp, names, munge, prevtags):
410 409 fp.seek(0, 2)
411 410 if prevtags and prevtags[-1] != '\n':
412 411 fp.write('\n')
413 412 for name in names:
414 413 m = munge and munge(name) or name
415 414 if (self._tagscache.tagtypes and
416 415 name in self._tagscache.tagtypes):
417 416 old = self.tags().get(name, nullid)
418 417 fp.write('%s %s\n' % (hex(old), m))
419 418 fp.write('%s %s\n' % (hex(node), m))
420 419 fp.close()
421 420
422 421 prevtags = ''
423 422 if local:
424 423 try:
425 424 fp = self.opener('localtags', 'r+')
426 425 except IOError:
427 426 fp = self.opener('localtags', 'a')
428 427 else:
429 428 prevtags = fp.read()
430 429
431 430 # local tags are stored in the current charset
432 431 writetags(fp, names, None, prevtags)
433 432 for name in names:
434 433 self.hook('tag', node=hex(node), tag=name, local=local)
435 434 return
436 435
437 436 try:
438 437 fp = self.wfile('.hgtags', 'rb+')
439 438 except IOError, e:
440 439 if e.errno != errno.ENOENT:
441 440 raise
442 441 fp = self.wfile('.hgtags', 'ab')
443 442 else:
444 443 prevtags = fp.read()
445 444
446 445 # committed tags are stored in UTF-8
447 446 writetags(fp, names, encoding.fromlocal, prevtags)
448 447
449 448 fp.close()
450 449
451 450 self.invalidatecaches()
452 451
453 452 if '.hgtags' not in self.dirstate:
454 453 self[None].add(['.hgtags'])
455 454
456 455 m = matchmod.exact(self.root, '', ['.hgtags'])
457 456 tagnode = self.commit(message, user, date, extra=extra, match=m)
458 457
459 458 for name in names:
460 459 self.hook('tag', node=hex(node), tag=name, local=local)
461 460
462 461 return tagnode
463 462
464 463 def tag(self, names, node, message, local, user, date):
465 464 '''tag a revision with one or more symbolic names.
466 465
467 466 names is a list of strings or, when adding a single tag, names may be a
468 467 string.
469 468
470 469 if local is True, the tags are stored in a per-repository file.
471 470 otherwise, they are stored in the .hgtags file, and a new
472 471 changeset is committed with the change.
473 472
474 473 keyword arguments:
475 474
476 475 local: whether to store tags in non-version-controlled file
477 476 (default False)
478 477
479 478 message: commit message to use if committing
480 479
481 480 user: name of user to use if committing
482 481
483 482 date: date tuple to use if committing'''
484 483
485 484 if not local:
486 485 for x in self.status()[:5]:
487 486 if '.hgtags' in x:
488 487 raise util.Abort(_('working copy of .hgtags is changed '
489 488 '(please commit .hgtags manually)'))
490 489
491 490 self.tags() # instantiate the cache
492 491 self._tag(names, node, message, local, user, date)
493 492
494 493 @propertycache
495 494 def _tagscache(self):
496 495 '''Returns a tagscache object that contains various tags related
497 496 caches.'''
498 497
499 498 # This simplifies its cache management by having one decorated
500 499 # function (this one) and the rest simply fetch things from it.
501 500 class tagscache(object):
502 501 def __init__(self):
503 502 # These two define the set of tags for this repository. tags
504 503 # maps tag name to node; tagtypes maps tag name to 'global' or
505 504 # 'local'. (Global tags are defined by .hgtags across all
506 505 # heads, and local tags are defined in .hg/localtags.)
507 506 # They constitute the in-memory cache of tags.
508 507 self.tags = self.tagtypes = None
509 508
510 509 self.nodetagscache = self.tagslist = None
511 510
512 511 cache = tagscache()
513 512 cache.tags, cache.tagtypes = self._findtags()
514 513
515 514 return cache
516 515
517 516 def tags(self):
518 517 '''return a mapping of tag to node'''
519 518 t = {}
520 519 for k, v in self._tagscache.tags.iteritems():
521 520 try:
522 521 # ignore tags to unknown nodes
523 522 self.changelog.rev(v)
524 523 t[k] = v
525 524 except (error.LookupError, ValueError):
526 525 pass
527 526 return t
528 527
529 528 def _findtags(self):
530 529 '''Do the hard work of finding tags. Return a pair of dicts
531 530 (tags, tagtypes) where tags maps tag name to node, and tagtypes
532 531 maps tag name to a string like \'global\' or \'local\'.
533 532 Subclasses or extensions are free to add their own tags, but
534 533 should be aware that the returned dicts will be retained for the
535 534 duration of the localrepo object.'''
536 535
537 536 # XXX what tagtype should subclasses/extensions use? Currently
538 537 # mq and bookmarks add tags, but do not set the tagtype at all.
539 538 # Should each extension invent its own tag type? Should there
540 539 # be one tagtype for all such "virtual" tags? Or is the status
541 540 # quo fine?
542 541
543 542 alltags = {} # map tag name to (node, hist)
544 543 tagtypes = {}
545 544
546 545 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
547 546 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
548 547
549 548 # Build the return dicts. Have to re-encode tag names because
550 549 # the tags module always uses UTF-8 (in order not to lose info
551 550 # writing to the cache), but the rest of Mercurial wants them in
552 551 # local encoding.
553 552 tags = {}
554 553 for (name, (node, hist)) in alltags.iteritems():
555 554 if node != nullid:
556 555 tags[encoding.tolocal(name)] = node
557 556 tags['tip'] = self.changelog.tip()
558 557 tagtypes = dict([(encoding.tolocal(name), value)
559 558 for (name, value) in tagtypes.iteritems()])
560 559 return (tags, tagtypes)
561 560
562 561 def tagtype(self, tagname):
563 562 '''
564 563 return the type of the given tag. result can be:
565 564
566 565 'local' : a local tag
567 566 'global' : a global tag
568 567 None : tag does not exist
569 568 '''
570 569
571 570 return self._tagscache.tagtypes.get(tagname)
572 571
573 572 def tagslist(self):
574 573 '''return a list of tags ordered by revision'''
575 574 if not self._tagscache.tagslist:
576 575 l = []
577 576 for t, n in self.tags().iteritems():
578 577 r = self.changelog.rev(n)
579 578 l.append((r, t, n))
580 579 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
581 580
582 581 return self._tagscache.tagslist
583 582
584 583 def nodetags(self, node):
585 584 '''return the tags associated with a node'''
586 585 if not self._tagscache.nodetagscache:
587 586 nodetagscache = {}
588 587 for t, n in self._tagscache.tags.iteritems():
589 588 nodetagscache.setdefault(n, []).append(t)
590 589 for tags in nodetagscache.itervalues():
591 590 tags.sort()
592 591 self._tagscache.nodetagscache = nodetagscache
593 592 return self._tagscache.nodetagscache.get(node, [])
594 593
595 594 def nodebookmarks(self, node):
596 595 marks = []
597 596 for bookmark, n in self._bookmarks.iteritems():
598 597 if n == node:
599 598 marks.append(bookmark)
600 599 return sorted(marks)
601 600
602 601 def _branchtags(self, partial, lrev):
603 602 # TODO: rename this function?
604 603 tiprev = len(self) - 1
605 604 if lrev != tiprev:
606 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
605 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
607 606 self._updatebranchcache(partial, ctxgen)
608 607 self._writebranchcache(partial, self.changelog.tip(), tiprev)
609 608
610 609 return partial
611 610
612 611 def updatebranchcache(self):
613 612 tip = self.changelog.tip()
614 613 if self._branchcache is not None and self._branchcachetip == tip:
615 614 return
616 615
617 616 oldtip = self._branchcachetip
618 617 self._branchcachetip = tip
619 618 if oldtip is None or oldtip not in self.changelog.nodemap:
620 619 partial, last, lrev = self._readbranchcache()
621 620 else:
622 621 lrev = self.changelog.rev(oldtip)
623 622 partial = self._branchcache
624 623
625 624 self._branchtags(partial, lrev)
626 625 # this private cache holds all heads (not just the branch tips)
627 626 self._branchcache = partial
628 627
629 628 def branchmap(self):
630 629 '''returns a dictionary {branch: [branchheads]}'''
631 630 self.updatebranchcache()
632 631 return self._branchcache
633 632
634 633 def _branchtip(self, heads):
635 634 '''return the tipmost branch head in heads'''
636 635 tip = heads[-1]
637 636 for h in reversed(heads):
638 637 if not self[h].closesbranch():
639 638 tip = h
640 639 break
641 640 return tip
642 641
643 642 def branchtip(self, branch):
644 643 '''return the tip node for a given branch'''
645 644 if branch not in self.branchmap():
646 645 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
647 646 return self._branchtip(self.branchmap()[branch])
648 647
649 648 def branchtags(self):
650 649 '''return a dict where branch names map to the tipmost head of
651 650 the branch, open heads come before closed'''
652 651 bt = {}
653 652 for bn, heads in self.branchmap().iteritems():
654 653 bt[bn] = self._branchtip(heads)
655 654 return bt
656 655
657 656 def _readbranchcache(self):
658 657 partial = {}
659 658 try:
660 659 f = self.opener("cache/branchheads")
661 660 lines = f.read().split('\n')
662 661 f.close()
663 662 except (IOError, OSError):
664 663 return {}, nullid, nullrev
665 664
666 665 try:
667 666 last, lrev = lines.pop(0).split(" ", 1)
668 667 last, lrev = bin(last), int(lrev)
669 668 if lrev >= len(self) or self[lrev].node() != last:
670 669 # invalidate the cache
671 670 raise ValueError('invalidating branch cache (tip differs)')
672 671 for l in lines:
673 672 if not l:
674 673 continue
675 674 node, label = l.split(" ", 1)
676 675 label = encoding.tolocal(label.strip())
677 676 if not node in self:
678 677 raise ValueError('invalidating branch cache because node '+
679 678 '%s does not exist' % node)
680 679 partial.setdefault(label, []).append(bin(node))
681 680 except KeyboardInterrupt:
682 681 raise
683 682 except Exception, inst:
684 683 if self.ui.debugflag:
685 684 self.ui.warn(str(inst), '\n')
686 685 partial, last, lrev = {}, nullid, nullrev
687 686 return partial, last, lrev
688 687
689 688 def _writebranchcache(self, branches, tip, tiprev):
690 689 try:
691 690 f = self.opener("cache/branchheads", "w", atomictemp=True)
692 691 f.write("%s %s\n" % (hex(tip), tiprev))
693 692 for label, nodes in branches.iteritems():
694 693 for node in nodes:
695 694 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
696 695 f.close()
697 696 except (IOError, OSError):
698 697 pass
699 698
700 699 def _updatebranchcache(self, partial, ctxgen):
701 700 """Given a branchhead cache, partial, that may have extra nodes or be
702 701 missing heads, and a generator of nodes that are at least a superset of
703 702 heads missing, this function updates partial to be correct.
704 703 """
705 704 # collect new branch entries
706 705 newbranches = {}
707 706 for c in ctxgen:
708 707 newbranches.setdefault(c.branch(), []).append(c.node())
709 708 # if older branchheads are reachable from new ones, they aren't
710 709 # really branchheads. Note checking parents is insufficient:
711 710 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
712 711 for branch, newnodes in newbranches.iteritems():
713 712 bheads = partial.setdefault(branch, [])
714 713 # Remove candidate heads that no longer are in the repo (e.g., as
715 714 # the result of a strip that just happened). Avoid using 'node in
716 715 # self' here because that dives down into branchcache code somewhat
717 716 # recursively.
718 717 bheadrevs = [self.changelog.rev(node) for node in bheads
719 718 if self.changelog.hasnode(node)]
720 719 newheadrevs = [self.changelog.rev(node) for node in newnodes
721 720 if self.changelog.hasnode(node)]
722 721 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
723 722 # Remove duplicates - nodes that are in newheadrevs and are already
724 723 # in bheadrevs. This can happen if you strip a node whose parent
725 724 # was already a head (because they're on different branches).
726 725 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
727 726
728 727 # Starting from tip means fewer passes over reachable. If we know
729 728 # the new candidates are not ancestors of existing heads, we don't
730 729 # have to examine ancestors of existing heads
731 730 if ctxisnew:
732 731 iterrevs = sorted(newheadrevs)
733 732 else:
734 733 iterrevs = list(bheadrevs)
735 734
736 735 # This loop prunes out two kinds of heads - heads that are
737 736 # superseded by a head in newheadrevs, and newheadrevs that are not
738 737 # heads because an existing head is their descendant.
739 738 while iterrevs:
740 739 latest = iterrevs.pop()
741 740 if latest not in bheadrevs:
742 741 continue
743 742 ancestors = set(self.changelog.ancestors([latest],
744 743 bheadrevs[0]))
745 744 if ancestors:
746 745 bheadrevs = [b for b in bheadrevs if b not in ancestors]
747 746 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
748 747
749 748 # There may be branches that cease to exist when the last commit in the
750 749 # branch was stripped. This code filters them out. Note that the
751 750 # branch that ceased to exist may not be in newbranches because
752 751 # newbranches is the set of candidate heads, which when you strip the
753 752 # last commit in a branch will be the parent branch.
754 753 for branch in partial.keys():
755 754 nodes = [head for head in partial[branch]
756 755 if self.changelog.hasnode(head)]
757 756 if not nodes:
758 757 del partial[branch]
759 758
760 759 def lookup(self, key):
761 760 return self[key].node()
762 761
763 762 def lookupbranch(self, key, remote=None):
764 763 repo = remote or self
765 764 if key in repo.branchmap():
766 765 return key
767 766
768 767 repo = (remote and remote.local()) and remote or self
769 768 return repo[key].branch()
770 769
771 770 def known(self, nodes):
772 771 nm = self.changelog.nodemap
773 772 pc = self._phasecache
774 773 result = []
775 774 for n in nodes:
776 775 r = nm.get(n)
777 776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
778 777 result.append(resp)
779 778 return result
780 779
781 780 def local(self):
782 781 return self
783 782
784 783 def cancopy(self):
785 784 return self.local() # so statichttprepo's override of local() works
786 785
787 786 def join(self, f):
788 787 return os.path.join(self.path, f)
789 788
790 789 def wjoin(self, f):
791 790 return os.path.join(self.root, f)
792 791
793 792 def file(self, f):
794 793 if f[0] == '/':
795 794 f = f[1:]
796 795 return filelog.filelog(self.sopener, f)
797 796
798 797 def changectx(self, changeid):
799 798 return self[changeid]
800 799
801 800 def parents(self, changeid=None):
802 801 '''get list of changectxs for parents of changeid'''
803 802 return self[changeid].parents()
804 803
805 804 def setparents(self, p1, p2=nullid):
806 805 copies = self.dirstate.setparents(p1, p2)
807 806 if copies:
808 807 # Adjust copy records, the dirstate cannot do it, it
809 808 # requires access to parents manifests. Preserve them
810 809 # only for entries added to first parent.
811 810 pctx = self[p1]
812 811 for f in copies:
813 812 if f not in pctx and copies[f] in pctx:
814 813 self.dirstate.copy(copies[f], f)
815 814
816 815 def filectx(self, path, changeid=None, fileid=None):
817 816 """changeid can be a changeset revision, node, or tag.
818 817 fileid can be a file revision or node."""
819 818 return context.filectx(self, path, changeid, fileid)
820 819
821 820 def getcwd(self):
822 821 return self.dirstate.getcwd()
823 822
824 823 def pathto(self, f, cwd=None):
825 824 return self.dirstate.pathto(f, cwd)
826 825
827 826 def wfile(self, f, mode='r'):
828 827 return self.wopener(f, mode)
829 828
830 829 def _link(self, f):
831 830 return os.path.islink(self.wjoin(f))
832 831
833 832 def _loadfilter(self, filter):
834 833 if filter not in self.filterpats:
835 834 l = []
836 835 for pat, cmd in self.ui.configitems(filter):
837 836 if cmd == '!':
838 837 continue
839 838 mf = matchmod.match(self.root, '', [pat])
840 839 fn = None
841 840 params = cmd
842 841 for name, filterfn in self._datafilters.iteritems():
843 842 if cmd.startswith(name):
844 843 fn = filterfn
845 844 params = cmd[len(name):].lstrip()
846 845 break
847 846 if not fn:
848 847 fn = lambda s, c, **kwargs: util.filter(s, c)
849 848 # Wrap old filters not supporting keyword arguments
850 849 if not inspect.getargspec(fn)[2]:
851 850 oldfn = fn
852 851 fn = lambda s, c, **kwargs: oldfn(s, c)
853 852 l.append((mf, fn, params))
854 853 self.filterpats[filter] = l
855 854 return self.filterpats[filter]
856 855
857 856 def _filter(self, filterpats, filename, data):
858 857 for mf, fn, cmd in filterpats:
859 858 if mf(filename):
860 859 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
861 860 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
862 861 break
863 862
864 863 return data
865 864
866 865 @propertycache
867 866 def _encodefilterpats(self):
868 867 return self._loadfilter('encode')
869 868
870 869 @propertycache
871 870 def _decodefilterpats(self):
872 871 return self._loadfilter('decode')
873 872
874 873 def adddatafilter(self, name, filter):
875 874 self._datafilters[name] = filter
876 875
877 876 def wread(self, filename):
878 877 if self._link(filename):
879 878 data = os.readlink(self.wjoin(filename))
880 879 else:
881 880 data = self.wopener.read(filename)
882 881 return self._filter(self._encodefilterpats, filename, data)
883 882
884 883 def wwrite(self, filename, data, flags):
885 884 data = self._filter(self._decodefilterpats, filename, data)
886 885 if 'l' in flags:
887 886 self.wopener.symlink(data, filename)
888 887 else:
889 888 self.wopener.write(filename, data)
890 889 if 'x' in flags:
891 890 util.setflags(self.wjoin(filename), False, True)
892 891
893 892 def wwritedata(self, filename, data):
894 893 return self._filter(self._decodefilterpats, filename, data)
895 894
896 895 def transaction(self, desc):
897 896 tr = self._transref and self._transref() or None
898 897 if tr and tr.running():
899 898 return tr.nest()
900 899
901 900 # abort here if the journal already exists
902 901 if os.path.exists(self.sjoin("journal")):
903 902 raise error.RepoError(
904 903 _("abandoned transaction found - run hg recover"))
905 904
906 905 self._writejournal(desc)
907 906 renames = [(x, undoname(x)) for x in self._journalfiles()]
908 907
909 908 tr = transaction.transaction(self.ui.warn, self.sopener,
910 909 self.sjoin("journal"),
911 910 aftertrans(renames),
912 911 self.store.createmode)
913 912 self._transref = weakref.ref(tr)
914 913 return tr
915 914
916 915 def _journalfiles(self):
917 916 return (self.sjoin('journal'), self.join('journal.dirstate'),
918 917 self.join('journal.branch'), self.join('journal.desc'),
919 918 self.join('journal.bookmarks'),
920 919 self.sjoin('journal.phaseroots'))
921 920
922 921 def undofiles(self):
923 922 return [undoname(x) for x in self._journalfiles()]
924 923
925 924 def _writejournal(self, desc):
926 925 self.opener.write("journal.dirstate",
927 926 self.opener.tryread("dirstate"))
928 927 self.opener.write("journal.branch",
929 928 encoding.fromlocal(self.dirstate.branch()))
930 929 self.opener.write("journal.desc",
931 930 "%d\n%s\n" % (len(self), desc))
932 931 self.opener.write("journal.bookmarks",
933 932 self.opener.tryread("bookmarks"))
934 933 self.sopener.write("journal.phaseroots",
935 934 self.sopener.tryread("phaseroots"))
936 935
937 936 def recover(self):
938 937 lock = self.lock()
939 938 try:
940 939 if os.path.exists(self.sjoin("journal")):
941 940 self.ui.status(_("rolling back interrupted transaction\n"))
942 941 transaction.rollback(self.sopener, self.sjoin("journal"),
943 942 self.ui.warn)
944 943 self.invalidate()
945 944 return True
946 945 else:
947 946 self.ui.warn(_("no interrupted transaction available\n"))
948 947 return False
949 948 finally:
950 949 lock.release()
951 950
952 951 def rollback(self, dryrun=False, force=False):
953 952 wlock = lock = None
954 953 try:
955 954 wlock = self.wlock()
956 955 lock = self.lock()
957 956 if os.path.exists(self.sjoin("undo")):
958 957 return self._rollback(dryrun, force)
959 958 else:
960 959 self.ui.warn(_("no rollback information available\n"))
961 960 return 1
962 961 finally:
963 962 release(lock, wlock)
964 963
965 964 def _rollback(self, dryrun, force):
966 965 ui = self.ui
967 966 try:
968 967 args = self.opener.read('undo.desc').splitlines()
969 968 (oldlen, desc, detail) = (int(args[0]), args[1], None)
970 969 if len(args) >= 3:
971 970 detail = args[2]
972 971 oldtip = oldlen - 1
973 972
974 973 if detail and ui.verbose:
975 974 msg = (_('repository tip rolled back to revision %s'
976 975 ' (undo %s: %s)\n')
977 976 % (oldtip, desc, detail))
978 977 else:
979 978 msg = (_('repository tip rolled back to revision %s'
980 979 ' (undo %s)\n')
981 980 % (oldtip, desc))
982 981 except IOError:
983 982 msg = _('rolling back unknown transaction\n')
984 983 desc = None
985 984
986 985 if not force and self['.'] != self['tip'] and desc == 'commit':
987 986 raise util.Abort(
988 987 _('rollback of last commit while not checked out '
989 988 'may lose data'), hint=_('use -f to force'))
990 989
991 990 ui.status(msg)
992 991 if dryrun:
993 992 return 0
994 993
995 994 parents = self.dirstate.parents()
996 995 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
997 996 if os.path.exists(self.join('undo.bookmarks')):
998 997 util.rename(self.join('undo.bookmarks'),
999 998 self.join('bookmarks'))
1000 999 if os.path.exists(self.sjoin('undo.phaseroots')):
1001 1000 util.rename(self.sjoin('undo.phaseroots'),
1002 1001 self.sjoin('phaseroots'))
1003 1002 self.invalidate()
1004 1003
1005 1004 # Discard all cache entries to force reloading everything.
1006 1005 self._filecache.clear()
1007 1006
1008 1007 parentgone = (parents[0] not in self.changelog.nodemap or
1009 1008 parents[1] not in self.changelog.nodemap)
1010 1009 if parentgone:
1011 1010 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1012 1011 try:
1013 1012 branch = self.opener.read('undo.branch')
1014 1013 self.dirstate.setbranch(encoding.tolocal(branch))
1015 1014 except IOError:
1016 1015 ui.warn(_('named branch could not be reset: '
1017 1016 'current branch is still \'%s\'\n')
1018 1017 % self.dirstate.branch())
1019 1018
1020 1019 self.dirstate.invalidate()
1021 1020 parents = tuple([p.rev() for p in self.parents()])
1022 1021 if len(parents) > 1:
1023 1022 ui.status(_('working directory now based on '
1024 1023 'revisions %d and %d\n') % parents)
1025 1024 else:
1026 1025 ui.status(_('working directory now based on '
1027 1026 'revision %d\n') % parents)
1028 1027 # TODO: if we know which new heads may result from this rollback, pass
1029 1028 # them to destroy(), which will prevent the branchhead cache from being
1030 1029 # invalidated.
1031 1030 self.destroyed()
1032 1031 return 0
1033 1032
1034 1033 def invalidatecaches(self):
1035 1034 def delcache(name):
1036 1035 try:
1037 1036 delattr(self, name)
1038 1037 except AttributeError:
1039 1038 pass
1040 1039
1041 1040 delcache('_tagscache')
1042 1041
1043 1042 self._branchcache = None # in UTF-8
1044 1043 self._branchcachetip = None
1045 1044 obsolete.clearobscaches(self)
1046 1045
1047 1046 def invalidatedirstate(self):
1048 1047 '''Invalidates the dirstate, causing the next call to dirstate
1049 1048 to check if it was modified since the last time it was read,
1050 1049 rereading it if it has.
1051 1050
1052 1051 This is different to dirstate.invalidate() that it doesn't always
1053 1052 rereads the dirstate. Use dirstate.invalidate() if you want to
1054 1053 explicitly read the dirstate again (i.e. restoring it to a previous
1055 1054 known good state).'''
1056 1055 if 'dirstate' in self.__dict__:
1057 1056 for k in self.dirstate._filecache:
1058 1057 try:
1059 1058 delattr(self.dirstate, k)
1060 1059 except AttributeError:
1061 1060 pass
1062 1061 delattr(self, 'dirstate')
1063 1062
1064 1063 def invalidate(self):
1065 1064 for k in self._filecache:
1066 1065 # dirstate is invalidated separately in invalidatedirstate()
1067 1066 if k == 'dirstate':
1068 1067 continue
1069 1068
1070 1069 try:
1071 1070 delattr(self, k)
1072 1071 except AttributeError:
1073 1072 pass
1074 1073 self.invalidatecaches()
1075 1074
1076 1075 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1077 1076 try:
1078 1077 l = lock.lock(lockname, 0, releasefn, desc=desc)
1079 1078 except error.LockHeld, inst:
1080 1079 if not wait:
1081 1080 raise
1082 1081 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1083 1082 (desc, inst.locker))
1084 1083 # default to 600 seconds timeout
1085 1084 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1086 1085 releasefn, desc=desc)
1087 1086 if acquirefn:
1088 1087 acquirefn()
1089 1088 return l
1090 1089
1091 1090 def _afterlock(self, callback):
1092 1091 """add a callback to the current repository lock.
1093 1092
1094 1093 The callback will be executed on lock release."""
1095 1094 l = self._lockref and self._lockref()
1096 1095 if l:
1097 1096 l.postrelease.append(callback)
1098 1097 else:
1099 1098 callback()
1100 1099
1101 1100 def lock(self, wait=True):
1102 1101 '''Lock the repository store (.hg/store) and return a weak reference
1103 1102 to the lock. Use this before modifying the store (e.g. committing or
1104 1103 stripping). If you are opening a transaction, get a lock as well.)'''
1105 1104 l = self._lockref and self._lockref()
1106 1105 if l is not None and l.held:
1107 1106 l.lock()
1108 1107 return l
1109 1108
1110 1109 def unlock():
1111 1110 self.store.write()
1112 1111 if '_phasecache' in vars(self):
1113 1112 self._phasecache.write()
1114 1113 for k, ce in self._filecache.items():
1115 1114 if k == 'dirstate':
1116 1115 continue
1117 1116 ce.refresh()
1118 1117
1119 1118 l = self._lock(self.sjoin("lock"), wait, unlock,
1120 1119 self.invalidate, _('repository %s') % self.origroot)
1121 1120 self._lockref = weakref.ref(l)
1122 1121 return l
1123 1122
1124 1123 def wlock(self, wait=True):
1125 1124 '''Lock the non-store parts of the repository (everything under
1126 1125 .hg except .hg/store) and return a weak reference to the lock.
1127 1126 Use this before modifying files in .hg.'''
1128 1127 l = self._wlockref and self._wlockref()
1129 1128 if l is not None and l.held:
1130 1129 l.lock()
1131 1130 return l
1132 1131
1133 1132 def unlock():
1134 1133 self.dirstate.write()
1135 1134 ce = self._filecache.get('dirstate')
1136 1135 if ce:
1137 1136 ce.refresh()
1138 1137
1139 1138 l = self._lock(self.join("wlock"), wait, unlock,
1140 1139 self.invalidatedirstate, _('working directory of %s') %
1141 1140 self.origroot)
1142 1141 self._wlockref = weakref.ref(l)
1143 1142 return l
1144 1143
1145 1144 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1146 1145 """
1147 1146 commit an individual file as part of a larger transaction
1148 1147 """
1149 1148
1150 1149 fname = fctx.path()
1151 1150 text = fctx.data()
1152 1151 flog = self.file(fname)
1153 1152 fparent1 = manifest1.get(fname, nullid)
1154 1153 fparent2 = fparent2o = manifest2.get(fname, nullid)
1155 1154
1156 1155 meta = {}
1157 1156 copy = fctx.renamed()
1158 1157 if copy and copy[0] != fname:
1159 1158 # Mark the new revision of this file as a copy of another
1160 1159 # file. This copy data will effectively act as a parent
1161 1160 # of this new revision. If this is a merge, the first
1162 1161 # parent will be the nullid (meaning "look up the copy data")
1163 1162 # and the second one will be the other parent. For example:
1164 1163 #
1165 1164 # 0 --- 1 --- 3 rev1 changes file foo
1166 1165 # \ / rev2 renames foo to bar and changes it
1167 1166 # \- 2 -/ rev3 should have bar with all changes and
1168 1167 # should record that bar descends from
1169 1168 # bar in rev2 and foo in rev1
1170 1169 #
1171 1170 # this allows this merge to succeed:
1172 1171 #
1173 1172 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1174 1173 # \ / merging rev3 and rev4 should use bar@rev2
1175 1174 # \- 2 --- 4 as the merge base
1176 1175 #
1177 1176
1178 1177 cfname = copy[0]
1179 1178 crev = manifest1.get(cfname)
1180 1179 newfparent = fparent2
1181 1180
1182 1181 if manifest2: # branch merge
1183 1182 if fparent2 == nullid or crev is None: # copied on remote side
1184 1183 if cfname in manifest2:
1185 1184 crev = manifest2[cfname]
1186 1185 newfparent = fparent1
1187 1186
1188 1187 # find source in nearest ancestor if we've lost track
1189 1188 if not crev:
1190 1189 self.ui.debug(" %s: searching for copy revision for %s\n" %
1191 1190 (fname, cfname))
1192 1191 for ancestor in self[None].ancestors():
1193 1192 if cfname in ancestor:
1194 1193 crev = ancestor[cfname].filenode()
1195 1194 break
1196 1195
1197 1196 if crev:
1198 1197 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1199 1198 meta["copy"] = cfname
1200 1199 meta["copyrev"] = hex(crev)
1201 1200 fparent1, fparent2 = nullid, newfparent
1202 1201 else:
1203 1202 self.ui.warn(_("warning: can't find ancestor for '%s' "
1204 1203 "copied from '%s'!\n") % (fname, cfname))
1205 1204
1206 1205 elif fparent2 != nullid:
1207 1206 # is one parent an ancestor of the other?
1208 1207 fparentancestor = flog.ancestor(fparent1, fparent2)
1209 1208 if fparentancestor == fparent1:
1210 1209 fparent1, fparent2 = fparent2, nullid
1211 1210 elif fparentancestor == fparent2:
1212 1211 fparent2 = nullid
1213 1212
1214 1213 # is the file changed?
1215 1214 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1216 1215 changelist.append(fname)
1217 1216 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1218 1217
1219 1218 # are just the flags changed during merge?
1220 1219 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1221 1220 changelist.append(fname)
1222 1221
1223 1222 return fparent1
1224 1223
1225 1224 def commit(self, text="", user=None, date=None, match=None, force=False,
1226 1225 editor=False, extra={}):
1227 1226 """Add a new revision to current repository.
1228 1227
1229 1228 Revision information is gathered from the working directory,
1230 1229 match can be used to filter the committed files. If editor is
1231 1230 supplied, it is called to get a commit message.
1232 1231 """
1233 1232
1234 1233 def fail(f, msg):
1235 1234 raise util.Abort('%s: %s' % (f, msg))
1236 1235
1237 1236 if not match:
1238 1237 match = matchmod.always(self.root, '')
1239 1238
1240 1239 if not force:
1241 1240 vdirs = []
1242 1241 match.dir = vdirs.append
1243 1242 match.bad = fail
1244 1243
1245 1244 wlock = self.wlock()
1246 1245 try:
1247 1246 wctx = self[None]
1248 1247 merge = len(wctx.parents()) > 1
1249 1248
1250 1249 if (not force and merge and match and
1251 1250 (match.files() or match.anypats())):
1252 1251 raise util.Abort(_('cannot partially commit a merge '
1253 1252 '(do not specify files or patterns)'))
1254 1253
1255 1254 changes = self.status(match=match, clean=force)
1256 1255 if force:
1257 1256 changes[0].extend(changes[6]) # mq may commit unchanged files
1258 1257
1259 1258 # check subrepos
1260 1259 subs = []
1261 1260 commitsubs = set()
1262 1261 newstate = wctx.substate.copy()
1263 1262 # only manage subrepos and .hgsubstate if .hgsub is present
1264 1263 if '.hgsub' in wctx:
1265 1264 # we'll decide whether to track this ourselves, thanks
1266 1265 if '.hgsubstate' in changes[0]:
1267 1266 changes[0].remove('.hgsubstate')
1268 1267 if '.hgsubstate' in changes[2]:
1269 1268 changes[2].remove('.hgsubstate')
1270 1269
1271 1270 # compare current state to last committed state
1272 1271 # build new substate based on last committed state
1273 1272 oldstate = wctx.p1().substate
1274 1273 for s in sorted(newstate.keys()):
1275 1274 if not match(s):
1276 1275 # ignore working copy, use old state if present
1277 1276 if s in oldstate:
1278 1277 newstate[s] = oldstate[s]
1279 1278 continue
1280 1279 if not force:
1281 1280 raise util.Abort(
1282 1281 _("commit with new subrepo %s excluded") % s)
1283 1282 if wctx.sub(s).dirty(True):
1284 1283 if not self.ui.configbool('ui', 'commitsubrepos'):
1285 1284 raise util.Abort(
1286 1285 _("uncommitted changes in subrepo %s") % s,
1287 1286 hint=_("use --subrepos for recursive commit"))
1288 1287 subs.append(s)
1289 1288 commitsubs.add(s)
1290 1289 else:
1291 1290 bs = wctx.sub(s).basestate()
1292 1291 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1293 1292 if oldstate.get(s, (None, None, None))[1] != bs:
1294 1293 subs.append(s)
1295 1294
1296 1295 # check for removed subrepos
1297 1296 for p in wctx.parents():
1298 1297 r = [s for s in p.substate if s not in newstate]
1299 1298 subs += [s for s in r if match(s)]
1300 1299 if subs:
1301 1300 if (not match('.hgsub') and
1302 1301 '.hgsub' in (wctx.modified() + wctx.added())):
1303 1302 raise util.Abort(
1304 1303 _("can't commit subrepos without .hgsub"))
1305 1304 changes[0].insert(0, '.hgsubstate')
1306 1305
1307 1306 elif '.hgsub' in changes[2]:
1308 1307 # clean up .hgsubstate when .hgsub is removed
1309 1308 if ('.hgsubstate' in wctx and
1310 1309 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1311 1310 changes[2].insert(0, '.hgsubstate')
1312 1311
1313 1312 # make sure all explicit patterns are matched
1314 1313 if not force and match.files():
1315 1314 matched = set(changes[0] + changes[1] + changes[2])
1316 1315
1317 1316 for f in match.files():
1318 1317 f = self.dirstate.normalize(f)
1319 1318 if f == '.' or f in matched or f in wctx.substate:
1320 1319 continue
1321 1320 if f in changes[3]: # missing
1322 1321 fail(f, _('file not found!'))
1323 1322 if f in vdirs: # visited directory
1324 1323 d = f + '/'
1325 1324 for mf in matched:
1326 1325 if mf.startswith(d):
1327 1326 break
1328 1327 else:
1329 1328 fail(f, _("no match under directory!"))
1330 1329 elif f not in self.dirstate:
1331 1330 fail(f, _("file not tracked!"))
1332 1331
1333 1332 if (not force and not extra.get("close") and not merge
1334 1333 and not (changes[0] or changes[1] or changes[2])
1335 1334 and wctx.branch() == wctx.p1().branch()):
1336 1335 return None
1337 1336
1338 1337 if merge and changes[3]:
1339 1338 raise util.Abort(_("cannot commit merge with missing files"))
1340 1339
1341 1340 ms = mergemod.mergestate(self)
1342 1341 for f in changes[0]:
1343 1342 if f in ms and ms[f] == 'u':
1344 1343 raise util.Abort(_("unresolved merge conflicts "
1345 1344 "(see hg help resolve)"))
1346 1345
1347 1346 cctx = context.workingctx(self, text, user, date, extra, changes)
1348 1347 if editor:
1349 1348 cctx._text = editor(self, cctx, subs)
1350 1349 edited = (text != cctx._text)
1351 1350
1352 1351 # commit subs and write new state
1353 1352 if subs:
1354 1353 for s in sorted(commitsubs):
1355 1354 sub = wctx.sub(s)
1356 1355 self.ui.status(_('committing subrepository %s\n') %
1357 1356 subrepo.subrelpath(sub))
1358 1357 sr = sub.commit(cctx._text, user, date)
1359 1358 newstate[s] = (newstate[s][0], sr)
1360 1359 subrepo.writestate(self, newstate)
1361 1360
1362 1361 # Save commit message in case this transaction gets rolled back
1363 1362 # (e.g. by a pretxncommit hook). Leave the content alone on
1364 1363 # the assumption that the user will use the same editor again.
1365 1364 msgfn = self.savecommitmessage(cctx._text)
1366 1365
1367 1366 p1, p2 = self.dirstate.parents()
1368 1367 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1369 1368 try:
1370 1369 self.hook("precommit", throw=True, parent1=hookp1,
1371 1370 parent2=hookp2)
1372 1371 ret = self.commitctx(cctx, True)
1373 1372 except: # re-raises
1374 1373 if edited:
1375 1374 self.ui.write(
1376 1375 _('note: commit message saved in %s\n') % msgfn)
1377 1376 raise
1378 1377
1379 1378 # update bookmarks, dirstate and mergestate
1380 1379 bookmarks.update(self, [p1, p2], ret)
1381 1380 for f in changes[0] + changes[1]:
1382 1381 self.dirstate.normal(f)
1383 1382 for f in changes[2]:
1384 1383 self.dirstate.drop(f)
1385 1384 self.dirstate.setparents(ret)
1386 1385 ms.reset()
1387 1386 finally:
1388 1387 wlock.release()
1389 1388
1390 1389 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1391 1390 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1392 1391 self._afterlock(commithook)
1393 1392 return ret
1394 1393
1395 1394 def commitctx(self, ctx, error=False):
1396 1395 """Add a new revision to current repository.
1397 1396 Revision information is passed via the context argument.
1398 1397 """
1399 1398
1400 1399 tr = lock = None
1401 1400 removed = list(ctx.removed())
1402 1401 p1, p2 = ctx.p1(), ctx.p2()
1403 1402 user = ctx.user()
1404 1403
1405 1404 lock = self.lock()
1406 1405 try:
1407 1406 tr = self.transaction("commit")
1408 1407 trp = weakref.proxy(tr)
1409 1408
1410 1409 if ctx.files():
1411 1410 m1 = p1.manifest().copy()
1412 1411 m2 = p2.manifest()
1413 1412
1414 1413 # check in files
1415 1414 new = {}
1416 1415 changed = []
1417 1416 linkrev = len(self)
1418 1417 for f in sorted(ctx.modified() + ctx.added()):
1419 1418 self.ui.note(f + "\n")
1420 1419 try:
1421 1420 fctx = ctx[f]
1422 1421 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1423 1422 changed)
1424 1423 m1.set(f, fctx.flags())
1425 1424 except OSError, inst:
1426 1425 self.ui.warn(_("trouble committing %s!\n") % f)
1427 1426 raise
1428 1427 except IOError, inst:
1429 1428 errcode = getattr(inst, 'errno', errno.ENOENT)
1430 1429 if error or errcode and errcode != errno.ENOENT:
1431 1430 self.ui.warn(_("trouble committing %s!\n") % f)
1432 1431 raise
1433 1432 else:
1434 1433 removed.append(f)
1435 1434
1436 1435 # update manifest
1437 1436 m1.update(new)
1438 1437 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1439 1438 drop = [f for f in removed if f in m1]
1440 1439 for f in drop:
1441 1440 del m1[f]
1442 1441 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1443 1442 p2.manifestnode(), (new, drop))
1444 1443 files = changed + removed
1445 1444 else:
1446 1445 mn = p1.manifestnode()
1447 1446 files = []
1448 1447
1449 1448 # update changelog
1450 1449 self.changelog.delayupdate()
1451 1450 n = self.changelog.add(mn, files, ctx.description(),
1452 1451 trp, p1.node(), p2.node(),
1453 1452 user, ctx.date(), ctx.extra().copy())
1454 1453 p = lambda: self.changelog.writepending() and self.root or ""
1455 1454 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1456 1455 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1457 1456 parent2=xp2, pending=p)
1458 1457 self.changelog.finalize(trp)
1459 1458 # set the new commit is proper phase
1460 1459 targetphase = phases.newcommitphase(self.ui)
1461 1460 if targetphase:
1462 1461 # retract boundary do not alter parent changeset.
1463 1462 # if a parent have higher the resulting phase will
1464 1463 # be compliant anyway
1465 1464 #
1466 1465 # if minimal phase was 0 we don't need to retract anything
1467 1466 phases.retractboundary(self, targetphase, [n])
1468 1467 tr.close()
1469 1468 self.updatebranchcache()
1470 1469 return n
1471 1470 finally:
1472 1471 if tr:
1473 1472 tr.release()
1474 1473 lock.release()
1475 1474
1476 1475 def destroyed(self, newheadnodes=None):
1477 1476 '''Inform the repository that nodes have been destroyed.
1478 1477 Intended for use by strip and rollback, so there's a common
1479 1478 place for anything that has to be done after destroying history.
1480 1479
1481 1480 If you know the branchheadcache was uptodate before nodes were removed
1482 1481 and you also know the set of candidate new heads that may have resulted
1483 1482 from the destruction, you can set newheadnodes. This will enable the
1484 1483 code to update the branchheads cache, rather than having future code
1485 1484 decide it's invalid and regenerating it from scratch.
1486 1485 '''
1487 1486 # If we have info, newheadnodes, on how to update the branch cache, do
1488 1487 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1489 1488 # will be caught the next time it is read.
1490 1489 if newheadnodes:
1491 1490 tiprev = len(self) - 1
1492 1491 ctxgen = (self[node] for node in newheadnodes
1493 1492 if self.changelog.hasnode(node))
1494 1493 self._updatebranchcache(self._branchcache, ctxgen)
1495 1494 self._writebranchcache(self._branchcache, self.changelog.tip(),
1496 1495 tiprev)
1497 1496
1498 1497 # Ensure the persistent tag cache is updated. Doing it now
1499 1498 # means that the tag cache only has to worry about destroyed
1500 1499 # heads immediately after a strip/rollback. That in turn
1501 1500 # guarantees that "cachetip == currenttip" (comparing both rev
1502 1501 # and node) always means no nodes have been added or destroyed.
1503 1502
1504 1503 # XXX this is suboptimal when qrefresh'ing: we strip the current
1505 1504 # head, refresh the tag cache, then immediately add a new head.
1506 1505 # But I think doing it this way is necessary for the "instant
1507 1506 # tag cache retrieval" case to work.
1508 1507 self.invalidatecaches()
1509 1508
1510 1509 # Discard all cache entries to force reloading everything.
1511 1510 self._filecache.clear()
1512 1511
1513 1512 def walk(self, match, node=None):
1514 1513 '''
1515 1514 walk recursively through the directory tree or a given
1516 1515 changeset, finding all files matched by the match
1517 1516 function
1518 1517 '''
1519 1518 return self[node].walk(match)
1520 1519
1521 1520 def status(self, node1='.', node2=None, match=None,
1522 1521 ignored=False, clean=False, unknown=False,
1523 1522 listsubrepos=False):
1524 1523 """return status of files between two nodes or node and working
1525 1524 directory.
1526 1525
1527 1526 If node1 is None, use the first dirstate parent instead.
1528 1527 If node2 is None, compare node1 with working directory.
1529 1528 """
1530 1529
1531 1530 def mfmatches(ctx):
1532 1531 mf = ctx.manifest().copy()
1533 1532 if match.always():
1534 1533 return mf
1535 1534 for fn in mf.keys():
1536 1535 if not match(fn):
1537 1536 del mf[fn]
1538 1537 return mf
1539 1538
1540 1539 if isinstance(node1, context.changectx):
1541 1540 ctx1 = node1
1542 1541 else:
1543 1542 ctx1 = self[node1]
1544 1543 if isinstance(node2, context.changectx):
1545 1544 ctx2 = node2
1546 1545 else:
1547 1546 ctx2 = self[node2]
1548 1547
1549 1548 working = ctx2.rev() is None
1550 1549 parentworking = working and ctx1 == self['.']
1551 1550 match = match or matchmod.always(self.root, self.getcwd())
1552 1551 listignored, listclean, listunknown = ignored, clean, unknown
1553 1552
1554 1553 # load earliest manifest first for caching reasons
1555 1554 if not working and ctx2.rev() < ctx1.rev():
1556 1555 ctx2.manifest()
1557 1556
1558 1557 if not parentworking:
1559 1558 def bad(f, msg):
1560 1559 # 'f' may be a directory pattern from 'match.files()',
1561 1560 # so 'f not in ctx1' is not enough
1562 1561 if f not in ctx1 and f not in ctx1.dirs():
1563 1562 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1564 1563 match.bad = bad
1565 1564
1566 1565 if working: # we need to scan the working dir
1567 1566 subrepos = []
1568 1567 if '.hgsub' in self.dirstate:
1569 1568 subrepos = ctx2.substate.keys()
1570 1569 s = self.dirstate.status(match, subrepos, listignored,
1571 1570 listclean, listunknown)
1572 1571 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1573 1572
1574 1573 # check for any possibly clean files
1575 1574 if parentworking and cmp:
1576 1575 fixup = []
1577 1576 # do a full compare of any files that might have changed
1578 1577 for f in sorted(cmp):
1579 1578 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1580 1579 or ctx1[f].cmp(ctx2[f])):
1581 1580 modified.append(f)
1582 1581 else:
1583 1582 fixup.append(f)
1584 1583
1585 1584 # update dirstate for files that are actually clean
1586 1585 if fixup:
1587 1586 if listclean:
1588 1587 clean += fixup
1589 1588
1590 1589 try:
1591 1590 # updating the dirstate is optional
1592 1591 # so we don't wait on the lock
1593 1592 wlock = self.wlock(False)
1594 1593 try:
1595 1594 for f in fixup:
1596 1595 self.dirstate.normal(f)
1597 1596 finally:
1598 1597 wlock.release()
1599 1598 except error.LockError:
1600 1599 pass
1601 1600
1602 1601 if not parentworking:
1603 1602 mf1 = mfmatches(ctx1)
1604 1603 if working:
1605 1604 # we are comparing working dir against non-parent
1606 1605 # generate a pseudo-manifest for the working dir
1607 1606 mf2 = mfmatches(self['.'])
1608 1607 for f in cmp + modified + added:
1609 1608 mf2[f] = None
1610 1609 mf2.set(f, ctx2.flags(f))
1611 1610 for f in removed:
1612 1611 if f in mf2:
1613 1612 del mf2[f]
1614 1613 else:
1615 1614 # we are comparing two revisions
1616 1615 deleted, unknown, ignored = [], [], []
1617 1616 mf2 = mfmatches(ctx2)
1618 1617
1619 1618 modified, added, clean = [], [], []
1620 1619 withflags = mf1.withflags() | mf2.withflags()
1621 1620 for fn in mf2:
1622 1621 if fn in mf1:
1623 1622 if (fn not in deleted and
1624 1623 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1625 1624 (mf1[fn] != mf2[fn] and
1626 1625 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1627 1626 modified.append(fn)
1628 1627 elif listclean:
1629 1628 clean.append(fn)
1630 1629 del mf1[fn]
1631 1630 elif fn not in deleted:
1632 1631 added.append(fn)
1633 1632 removed = mf1.keys()
1634 1633
1635 1634 if working and modified and not self.dirstate._checklink:
1636 1635 # Symlink placeholders may get non-symlink-like contents
1637 1636 # via user error or dereferencing by NFS or Samba servers,
1638 1637 # so we filter out any placeholders that don't look like a
1639 1638 # symlink
1640 1639 sane = []
1641 1640 for f in modified:
1642 1641 if ctx2.flags(f) == 'l':
1643 1642 d = ctx2[f].data()
1644 1643 if len(d) >= 1024 or '\n' in d or util.binary(d):
1645 1644 self.ui.debug('ignoring suspect symlink placeholder'
1646 1645 ' "%s"\n' % f)
1647 1646 continue
1648 1647 sane.append(f)
1649 1648 modified = sane
1650 1649
1651 1650 r = modified, added, removed, deleted, unknown, ignored, clean
1652 1651
1653 1652 if listsubrepos:
1654 1653 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1655 1654 if working:
1656 1655 rev2 = None
1657 1656 else:
1658 1657 rev2 = ctx2.substate[subpath][1]
1659 1658 try:
1660 1659 submatch = matchmod.narrowmatcher(subpath, match)
1661 1660 s = sub.status(rev2, match=submatch, ignored=listignored,
1662 1661 clean=listclean, unknown=listunknown,
1663 1662 listsubrepos=True)
1664 1663 for rfiles, sfiles in zip(r, s):
1665 1664 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1666 1665 except error.LookupError:
1667 1666 self.ui.status(_("skipping missing subrepository: %s\n")
1668 1667 % subpath)
1669 1668
1670 1669 for l in r:
1671 1670 l.sort()
1672 1671 return r
1673 1672
1674 1673 def heads(self, start=None):
1675 1674 heads = self.changelog.heads(start)
1676 1675 # sort the output in rev descending order
1677 1676 return sorted(heads, key=self.changelog.rev, reverse=True)
1678 1677
1679 1678 def branchheads(self, branch=None, start=None, closed=False):
1680 1679 '''return a (possibly filtered) list of heads for the given branch
1681 1680
1682 1681 Heads are returned in topological order, from newest to oldest.
1683 1682 If branch is None, use the dirstate branch.
1684 1683 If start is not None, return only heads reachable from start.
1685 1684 If closed is True, return heads that are marked as closed as well.
1686 1685 '''
1687 1686 if branch is None:
1688 1687 branch = self[None].branch()
1689 1688 branches = self.branchmap()
1690 1689 if branch not in branches:
1691 1690 return []
1692 1691 # the cache returns heads ordered lowest to highest
1693 1692 bheads = list(reversed(branches[branch]))
1694 1693 if start is not None:
1695 1694 # filter out the heads that cannot be reached from startrev
1696 1695 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1697 1696 bheads = [h for h in bheads if h in fbheads]
1698 1697 if not closed:
1699 1698 bheads = [h for h in bheads if not self[h].closesbranch()]
1700 1699 return bheads
1701 1700
1702 1701 def branches(self, nodes):
1703 1702 if not nodes:
1704 1703 nodes = [self.changelog.tip()]
1705 1704 b = []
1706 1705 for n in nodes:
1707 1706 t = n
1708 1707 while True:
1709 1708 p = self.changelog.parents(n)
1710 1709 if p[1] != nullid or p[0] == nullid:
1711 1710 b.append((t, n, p[0], p[1]))
1712 1711 break
1713 1712 n = p[0]
1714 1713 return b
1715 1714
1716 1715 def between(self, pairs):
1717 1716 r = []
1718 1717
1719 1718 for top, bottom in pairs:
1720 1719 n, l, i = top, [], 0
1721 1720 f = 1
1722 1721
1723 1722 while n != bottom and n != nullid:
1724 1723 p = self.changelog.parents(n)[0]
1725 1724 if i == f:
1726 1725 l.append(n)
1727 1726 f = f * 2
1728 1727 n = p
1729 1728 i += 1
1730 1729
1731 1730 r.append(l)
1732 1731
1733 1732 return r
1734 1733
1735 1734 def pull(self, remote, heads=None, force=False):
1736 1735 # don't open transaction for nothing or you break future useful
1737 1736 # rollback call
1738 1737 tr = None
1739 1738 trname = 'pull\n' + util.hidepassword(remote.url())
1740 1739 lock = self.lock()
1741 1740 try:
1742 1741 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1743 1742 force=force)
1744 1743 common, fetch, rheads = tmp
1745 1744 if not fetch:
1746 1745 self.ui.status(_("no changes found\n"))
1747 1746 added = []
1748 1747 result = 0
1749 1748 else:
1750 1749 tr = self.transaction(trname)
1751 1750 if heads is None and list(common) == [nullid]:
1752 1751 self.ui.status(_("requesting all changes\n"))
1753 1752 elif heads is None and remote.capable('changegroupsubset'):
1754 1753 # issue1320, avoid a race if remote changed after discovery
1755 1754 heads = rheads
1756 1755
1757 1756 if remote.capable('getbundle'):
1758 1757 cg = remote.getbundle('pull', common=common,
1759 1758 heads=heads or rheads)
1760 1759 elif heads is None:
1761 1760 cg = remote.changegroup(fetch, 'pull')
1762 1761 elif not remote.capable('changegroupsubset'):
1763 1762 raise util.Abort(_("partial pull cannot be done because "
1764 1763 "other repository doesn't support "
1765 1764 "changegroupsubset."))
1766 1765 else:
1767 1766 cg = remote.changegroupsubset(fetch, heads, 'pull')
1768 1767 clstart = len(self.changelog)
1769 1768 result = self.addchangegroup(cg, 'pull', remote.url())
1770 1769 clend = len(self.changelog)
1771 1770 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1772 1771
1773 1772 # compute target subset
1774 1773 if heads is None:
1775 1774 # We pulled every thing possible
1776 1775 # sync on everything common
1777 1776 subset = common + added
1778 1777 else:
1779 1778 # We pulled a specific subset
1780 1779 # sync on this subset
1781 1780 subset = heads
1782 1781
1783 1782 # Get remote phases data from remote
1784 1783 remotephases = remote.listkeys('phases')
1785 1784 publishing = bool(remotephases.get('publishing', False))
1786 1785 if remotephases and not publishing:
1787 1786 # remote is new and unpublishing
1788 1787 pheads, _dr = phases.analyzeremotephases(self, subset,
1789 1788 remotephases)
1790 1789 phases.advanceboundary(self, phases.public, pheads)
1791 1790 phases.advanceboundary(self, phases.draft, subset)
1792 1791 else:
1793 1792 # Remote is old or publishing all common changesets
1794 1793 # should be seen as public
1795 1794 phases.advanceboundary(self, phases.public, subset)
1796 1795
1797 1796 if obsolete._enabled:
1798 1797 self.ui.debug('fetching remote obsolete markers')
1799 1798 remoteobs = remote.listkeys('obsolete')
1800 1799 if 'dump0' in remoteobs:
1801 1800 if tr is None:
1802 1801 tr = self.transaction(trname)
1803 1802 for key in sorted(remoteobs, reverse=True):
1804 1803 if key.startswith('dump'):
1805 1804 data = base85.b85decode(remoteobs[key])
1806 1805 self.obsstore.mergemarkers(tr, data)
1807 1806 if tr is not None:
1808 1807 tr.close()
1809 1808 finally:
1810 1809 if tr is not None:
1811 1810 tr.release()
1812 1811 lock.release()
1813 1812
1814 1813 return result
1815 1814
1816 1815 def checkpush(self, force, revs):
1817 1816 """Extensions can override this function if additional checks have
1818 1817 to be performed before pushing, or call it if they override push
1819 1818 command.
1820 1819 """
1821 1820 pass
1822 1821
1823 1822 def push(self, remote, force=False, revs=None, newbranch=False):
1824 1823 '''Push outgoing changesets (limited by revs) from the current
1825 1824 repository to remote. Return an integer:
1826 1825 - None means nothing to push
1827 1826 - 0 means HTTP error
1828 1827 - 1 means we pushed and remote head count is unchanged *or*
1829 1828 we have outgoing changesets but refused to push
1830 1829 - other values as described by addchangegroup()
1831 1830 '''
1832 1831 # there are two ways to push to remote repo:
1833 1832 #
1834 1833 # addchangegroup assumes local user can lock remote
1835 1834 # repo (local filesystem, old ssh servers).
1836 1835 #
1837 1836 # unbundle assumes local user cannot lock remote repo (new ssh
1838 1837 # servers, http servers).
1839 1838
1840 1839 if not remote.canpush():
1841 1840 raise util.Abort(_("destination does not support push"))
1842 1841 # get local lock as we might write phase data
1843 1842 locallock = self.lock()
1844 1843 try:
1845 1844 self.checkpush(force, revs)
1846 1845 lock = None
1847 1846 unbundle = remote.capable('unbundle')
1848 1847 if not unbundle:
1849 1848 lock = remote.lock()
1850 1849 try:
1851 1850 # discovery
1852 1851 fci = discovery.findcommonincoming
1853 1852 commoninc = fci(self, remote, force=force)
1854 1853 common, inc, remoteheads = commoninc
1855 1854 fco = discovery.findcommonoutgoing
1856 1855 outgoing = fco(self, remote, onlyheads=revs,
1857 1856 commoninc=commoninc, force=force)
1858 1857
1859 1858
1860 1859 if not outgoing.missing:
1861 1860 # nothing to push
1862 1861 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1863 1862 ret = None
1864 1863 else:
1865 1864 # something to push
1866 1865 if not force:
1867 1866 # if self.obsstore == False --> no obsolete
1868 1867 # then, save the iteration
1869 1868 if self.obsstore:
1870 1869 # this message are here for 80 char limit reason
1871 1870 mso = _("push includes an obsolete changeset: %s!")
1872 1871 msu = _("push includes an unstable changeset: %s!")
1873 1872 # If we are to push if there is at least one
1874 1873 # obsolete or unstable changeset in missing, at
1875 1874 # least one of the missinghead will be obsolete or
1876 1875 # unstable. So checking heads only is ok
1877 1876 for node in outgoing.missingheads:
1878 1877 ctx = self[node]
1879 1878 if ctx.obsolete():
1880 1879 raise util.Abort(_(mso) % ctx)
1881 1880 elif ctx.unstable():
1882 1881 raise util.Abort(_(msu) % ctx)
1883 1882 discovery.checkheads(self, remote, outgoing,
1884 1883 remoteheads, newbranch,
1885 1884 bool(inc))
1886 1885
1887 1886 # create a changegroup from local
1888 1887 if revs is None and not outgoing.excluded:
1889 1888 # push everything,
1890 1889 # use the fast path, no race possible on push
1891 1890 cg = self._changegroup(outgoing.missing, 'push')
1892 1891 else:
1893 1892 cg = self.getlocalbundle('push', outgoing)
1894 1893
1895 1894 # apply changegroup to remote
1896 1895 if unbundle:
1897 1896 # local repo finds heads on server, finds out what
1898 1897 # revs it must push. once revs transferred, if server
1899 1898 # finds it has different heads (someone else won
1900 1899 # commit/push race), server aborts.
1901 1900 if force:
1902 1901 remoteheads = ['force']
1903 1902 # ssh: return remote's addchangegroup()
1904 1903 # http: return remote's addchangegroup() or 0 for error
1905 1904 ret = remote.unbundle(cg, remoteheads, 'push')
1906 1905 else:
1907 1906 # we return an integer indicating remote head count
1908 1907 # change
1909 1908 ret = remote.addchangegroup(cg, 'push', self.url())
1910 1909
1911 1910 if ret:
1912 1911 # push succeed, synchronize target of the push
1913 1912 cheads = outgoing.missingheads
1914 1913 elif revs is None:
1915 1914 # All out push fails. synchronize all common
1916 1915 cheads = outgoing.commonheads
1917 1916 else:
1918 1917 # I want cheads = heads(::missingheads and ::commonheads)
1919 1918 # (missingheads is revs with secret changeset filtered out)
1920 1919 #
1921 1920 # This can be expressed as:
1922 1921 # cheads = ( (missingheads and ::commonheads)
1923 1922 # + (commonheads and ::missingheads))"
1924 1923 # )
1925 1924 #
1926 1925 # while trying to push we already computed the following:
1927 1926 # common = (::commonheads)
1928 1927 # missing = ((commonheads::missingheads) - commonheads)
1929 1928 #
1930 1929 # We can pick:
1931 1930 # * missingheads part of common (::commonheads)
1932 1931 common = set(outgoing.common)
1933 1932 cheads = [node for node in revs if node in common]
1934 1933 # and
1935 1934 # * commonheads parents on missing
1936 1935 revset = self.set('%ln and parents(roots(%ln))',
1937 1936 outgoing.commonheads,
1938 1937 outgoing.missing)
1939 1938 cheads.extend(c.node() for c in revset)
1940 1939 # even when we don't push, exchanging phase data is useful
1941 1940 remotephases = remote.listkeys('phases')
1942 1941 if not remotephases: # old server or public only repo
1943 1942 phases.advanceboundary(self, phases.public, cheads)
1944 1943 # don't push any phase data as there is nothing to push
1945 1944 else:
1946 1945 ana = phases.analyzeremotephases(self, cheads, remotephases)
1947 1946 pheads, droots = ana
1948 1947 ### Apply remote phase on local
1949 1948 if remotephases.get('publishing', False):
1950 1949 phases.advanceboundary(self, phases.public, cheads)
1951 1950 else: # publish = False
1952 1951 phases.advanceboundary(self, phases.public, pheads)
1953 1952 phases.advanceboundary(self, phases.draft, cheads)
1954 1953 ### Apply local phase on remote
1955 1954
1956 1955 # Get the list of all revs draft on remote by public here.
1957 1956 # XXX Beware that revset break if droots is not strictly
1958 1957 # XXX root we may want to ensure it is but it is costly
1959 1958 outdated = self.set('heads((%ln::%ln) and public())',
1960 1959 droots, cheads)
1961 1960 for newremotehead in outdated:
1962 1961 r = remote.pushkey('phases',
1963 1962 newremotehead.hex(),
1964 1963 str(phases.draft),
1965 1964 str(phases.public))
1966 1965 if not r:
1967 1966 self.ui.warn(_('updating %s to public failed!\n')
1968 1967 % newremotehead)
1969 1968 self.ui.debug('try to push obsolete markers to remote\n')
1970 1969 if (obsolete._enabled and self.obsstore and
1971 1970 'obsolete' in remote.listkeys('namespaces')):
1972 1971 rslts = []
1973 1972 remotedata = self.listkeys('obsolete')
1974 1973 for key in sorted(remotedata, reverse=True):
1975 1974 # reverse sort to ensure we end with dump0
1976 1975 data = remotedata[key]
1977 1976 rslts.append(remote.pushkey('obsolete', key, '', data))
1978 1977 if [r for r in rslts if not r]:
1979 1978 msg = _('failed to push some obsolete markers!\n')
1980 1979 self.ui.warn(msg)
1981 1980 finally:
1982 1981 if lock is not None:
1983 1982 lock.release()
1984 1983 finally:
1985 1984 locallock.release()
1986 1985
1987 1986 self.ui.debug("checking for updated bookmarks\n")
1988 1987 rb = remote.listkeys('bookmarks')
1989 1988 for k in rb.keys():
1990 1989 if k in self._bookmarks:
1991 1990 nr, nl = rb[k], hex(self._bookmarks[k])
1992 1991 if nr in self:
1993 1992 cr = self[nr]
1994 1993 cl = self[nl]
1995 1994 if bookmarks.validdest(self, cr, cl):
1996 1995 r = remote.pushkey('bookmarks', k, nr, nl)
1997 1996 if r:
1998 1997 self.ui.status(_("updating bookmark %s\n") % k)
1999 1998 else:
2000 1999 self.ui.warn(_('updating bookmark %s'
2001 2000 ' failed!\n') % k)
2002 2001
2003 2002 return ret
2004 2003
2005 2004 def changegroupinfo(self, nodes, source):
2006 2005 if self.ui.verbose or source == 'bundle':
2007 2006 self.ui.status(_("%d changesets found\n") % len(nodes))
2008 2007 if self.ui.debugflag:
2009 2008 self.ui.debug("list of changesets:\n")
2010 2009 for node in nodes:
2011 2010 self.ui.debug("%s\n" % hex(node))
2012 2011
2013 2012 def changegroupsubset(self, bases, heads, source):
2014 2013 """Compute a changegroup consisting of all the nodes that are
2015 2014 descendants of any of the bases and ancestors of any of the heads.
2016 2015 Return a chunkbuffer object whose read() method will return
2017 2016 successive changegroup chunks.
2018 2017
2019 2018 It is fairly complex as determining which filenodes and which
2020 2019 manifest nodes need to be included for the changeset to be complete
2021 2020 is non-trivial.
2022 2021
2023 2022 Another wrinkle is doing the reverse, figuring out which changeset in
2024 2023 the changegroup a particular filenode or manifestnode belongs to.
2025 2024 """
2026 2025 cl = self.changelog
2027 2026 if not bases:
2028 2027 bases = [nullid]
2029 2028 csets, bases, heads = cl.nodesbetween(bases, heads)
2030 2029 # We assume that all ancestors of bases are known
2031 2030 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2032 2031 return self._changegroupsubset(common, csets, heads, source)
2033 2032
2034 2033 def getlocalbundle(self, source, outgoing):
2035 2034 """Like getbundle, but taking a discovery.outgoing as an argument.
2036 2035
2037 2036 This is only implemented for local repos and reuses potentially
2038 2037 precomputed sets in outgoing."""
2039 2038 if not outgoing.missing:
2040 2039 return None
2041 2040 return self._changegroupsubset(outgoing.common,
2042 2041 outgoing.missing,
2043 2042 outgoing.missingheads,
2044 2043 source)
2045 2044
2046 2045 def getbundle(self, source, heads=None, common=None):
2047 2046 """Like changegroupsubset, but returns the set difference between the
2048 2047 ancestors of heads and the ancestors common.
2049 2048
2050 2049 If heads is None, use the local heads. If common is None, use [nullid].
2051 2050
2052 2051 The nodes in common might not all be known locally due to the way the
2053 2052 current discovery protocol works.
2054 2053 """
2055 2054 cl = self.changelog
2056 2055 if common:
2057 2056 nm = cl.nodemap
2058 2057 common = [n for n in common if n in nm]
2059 2058 else:
2060 2059 common = [nullid]
2061 2060 if not heads:
2062 2061 heads = cl.heads()
2063 2062 return self.getlocalbundle(source,
2064 2063 discovery.outgoing(cl, common, heads))
2065 2064
2066 2065 def _changegroupsubset(self, commonrevs, csets, heads, source):
2067 2066
2068 2067 cl = self.changelog
2069 2068 mf = self.manifest
2070 2069 mfs = {} # needed manifests
2071 2070 fnodes = {} # needed file nodes
2072 2071 changedfiles = set()
2073 2072 fstate = ['', {}]
2074 2073 count = [0, 0]
2075 2074
2076 2075 # can we go through the fast path ?
2077 2076 heads.sort()
2078 2077 if heads == sorted(self.heads()):
2079 2078 return self._changegroup(csets, source)
2080 2079
2081 2080 # slow path
2082 2081 self.hook('preoutgoing', throw=True, source=source)
2083 2082 self.changegroupinfo(csets, source)
2084 2083
2085 2084 # filter any nodes that claim to be part of the known set
2086 2085 def prune(revlog, missing):
2087 2086 rr, rl = revlog.rev, revlog.linkrev
2088 2087 return [n for n in missing
2089 2088 if rl(rr(n)) not in commonrevs]
2090 2089
2091 2090 progress = self.ui.progress
2092 2091 _bundling = _('bundling')
2093 2092 _changesets = _('changesets')
2094 2093 _manifests = _('manifests')
2095 2094 _files = _('files')
2096 2095
2097 2096 def lookup(revlog, x):
2098 2097 if revlog == cl:
2099 2098 c = cl.read(x)
2100 2099 changedfiles.update(c[3])
2101 2100 mfs.setdefault(c[0], x)
2102 2101 count[0] += 1
2103 2102 progress(_bundling, count[0],
2104 2103 unit=_changesets, total=count[1])
2105 2104 return x
2106 2105 elif revlog == mf:
2107 2106 clnode = mfs[x]
2108 2107 mdata = mf.readfast(x)
2109 2108 for f, n in mdata.iteritems():
2110 2109 if f in changedfiles:
2111 2110 fnodes[f].setdefault(n, clnode)
2112 2111 count[0] += 1
2113 2112 progress(_bundling, count[0],
2114 2113 unit=_manifests, total=count[1])
2115 2114 return clnode
2116 2115 else:
2117 2116 progress(_bundling, count[0], item=fstate[0],
2118 2117 unit=_files, total=count[1])
2119 2118 return fstate[1][x]
2120 2119
2121 2120 bundler = changegroup.bundle10(lookup)
2122 2121 reorder = self.ui.config('bundle', 'reorder', 'auto')
2123 2122 if reorder == 'auto':
2124 2123 reorder = None
2125 2124 else:
2126 2125 reorder = util.parsebool(reorder)
2127 2126
2128 2127 def gengroup():
2129 2128 # Create a changenode group generator that will call our functions
2130 2129 # back to lookup the owning changenode and collect information.
2131 2130 count[:] = [0, len(csets)]
2132 2131 for chunk in cl.group(csets, bundler, reorder=reorder):
2133 2132 yield chunk
2134 2133 progress(_bundling, None)
2135 2134
2136 2135 # Create a generator for the manifestnodes that calls our lookup
2137 2136 # and data collection functions back.
2138 2137 for f in changedfiles:
2139 2138 fnodes[f] = {}
2140 2139 count[:] = [0, len(mfs)]
2141 2140 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2142 2141 yield chunk
2143 2142 progress(_bundling, None)
2144 2143
2145 2144 mfs.clear()
2146 2145
2147 2146 # Go through all our files in order sorted by name.
2148 2147 count[:] = [0, len(changedfiles)]
2149 2148 for fname in sorted(changedfiles):
2150 2149 filerevlog = self.file(fname)
2151 2150 if not len(filerevlog):
2152 2151 raise util.Abort(_("empty or missing revlog for %s")
2153 2152 % fname)
2154 2153 fstate[0] = fname
2155 2154 fstate[1] = fnodes.pop(fname, {})
2156 2155
2157 2156 nodelist = prune(filerevlog, fstate[1])
2158 2157 if nodelist:
2159 2158 count[0] += 1
2160 2159 yield bundler.fileheader(fname)
2161 2160 for chunk in filerevlog.group(nodelist, bundler, reorder):
2162 2161 yield chunk
2163 2162
2164 2163 # Signal that no more groups are left.
2165 2164 yield bundler.close()
2166 2165 progress(_bundling, None)
2167 2166
2168 2167 if csets:
2169 2168 self.hook('outgoing', node=hex(csets[0]), source=source)
2170 2169
2171 2170 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2172 2171
2173 2172 def changegroup(self, basenodes, source):
2174 2173 # to avoid a race we use changegroupsubset() (issue1320)
2175 2174 return self.changegroupsubset(basenodes, self.heads(), source)
2176 2175
2177 2176 def _changegroup(self, nodes, source):
2178 2177 """Compute the changegroup of all nodes that we have that a recipient
2179 2178 doesn't. Return a chunkbuffer object whose read() method will return
2180 2179 successive changegroup chunks.
2181 2180
2182 2181 This is much easier than the previous function as we can assume that
2183 2182 the recipient has any changenode we aren't sending them.
2184 2183
2185 2184 nodes is the set of nodes to send"""
2186 2185
2187 2186 cl = self.changelog
2188 2187 mf = self.manifest
2189 2188 mfs = {}
2190 2189 changedfiles = set()
2191 2190 fstate = ['']
2192 2191 count = [0, 0]
2193 2192
2194 2193 self.hook('preoutgoing', throw=True, source=source)
2195 2194 self.changegroupinfo(nodes, source)
2196 2195
2197 2196 revset = set([cl.rev(n) for n in nodes])
2198 2197
2199 2198 def gennodelst(log):
2200 2199 ln, llr = log.node, log.linkrev
2201 2200 return [ln(r) for r in log if llr(r) in revset]
2202 2201
2203 2202 progress = self.ui.progress
2204 2203 _bundling = _('bundling')
2205 2204 _changesets = _('changesets')
2206 2205 _manifests = _('manifests')
2207 2206 _files = _('files')
2208 2207
2209 2208 def lookup(revlog, x):
2210 2209 if revlog == cl:
2211 2210 c = cl.read(x)
2212 2211 changedfiles.update(c[3])
2213 2212 mfs.setdefault(c[0], x)
2214 2213 count[0] += 1
2215 2214 progress(_bundling, count[0],
2216 2215 unit=_changesets, total=count[1])
2217 2216 return x
2218 2217 elif revlog == mf:
2219 2218 count[0] += 1
2220 2219 progress(_bundling, count[0],
2221 2220 unit=_manifests, total=count[1])
2222 2221 return cl.node(revlog.linkrev(revlog.rev(x)))
2223 2222 else:
2224 2223 progress(_bundling, count[0], item=fstate[0],
2225 2224 total=count[1], unit=_files)
2226 2225 return cl.node(revlog.linkrev(revlog.rev(x)))
2227 2226
2228 2227 bundler = changegroup.bundle10(lookup)
2229 2228 reorder = self.ui.config('bundle', 'reorder', 'auto')
2230 2229 if reorder == 'auto':
2231 2230 reorder = None
2232 2231 else:
2233 2232 reorder = util.parsebool(reorder)
2234 2233
2235 2234 def gengroup():
2236 2235 '''yield a sequence of changegroup chunks (strings)'''
2237 2236 # construct a list of all changed files
2238 2237
2239 2238 count[:] = [0, len(nodes)]
2240 2239 for chunk in cl.group(nodes, bundler, reorder=reorder):
2241 2240 yield chunk
2242 2241 progress(_bundling, None)
2243 2242
2244 2243 count[:] = [0, len(mfs)]
2245 2244 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2246 2245 yield chunk
2247 2246 progress(_bundling, None)
2248 2247
2249 2248 count[:] = [0, len(changedfiles)]
2250 2249 for fname in sorted(changedfiles):
2251 2250 filerevlog = self.file(fname)
2252 2251 if not len(filerevlog):
2253 2252 raise util.Abort(_("empty or missing revlog for %s")
2254 2253 % fname)
2255 2254 fstate[0] = fname
2256 2255 nodelist = gennodelst(filerevlog)
2257 2256 if nodelist:
2258 2257 count[0] += 1
2259 2258 yield bundler.fileheader(fname)
2260 2259 for chunk in filerevlog.group(nodelist, bundler, reorder):
2261 2260 yield chunk
2262 2261 yield bundler.close()
2263 2262 progress(_bundling, None)
2264 2263
2265 2264 if nodes:
2266 2265 self.hook('outgoing', node=hex(nodes[0]), source=source)
2267 2266
2268 2267 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2269 2268
2270 2269 def addchangegroup(self, source, srctype, url, emptyok=False):
2271 2270 """Add the changegroup returned by source.read() to this repo.
2272 2271 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2273 2272 the URL of the repo where this changegroup is coming from.
2274 2273
2275 2274 Return an integer summarizing the change to this repo:
2276 2275 - nothing changed or no source: 0
2277 2276 - more heads than before: 1+added heads (2..n)
2278 2277 - fewer heads than before: -1-removed heads (-2..-n)
2279 2278 - number of heads stays the same: 1
2280 2279 """
2281 2280 def csmap(x):
2282 2281 self.ui.debug("add changeset %s\n" % short(x))
2283 2282 return len(cl)
2284 2283
2285 2284 def revmap(x):
2286 2285 return cl.rev(x)
2287 2286
2288 2287 if not source:
2289 2288 return 0
2290 2289
2291 2290 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2292 2291
2293 2292 changesets = files = revisions = 0
2294 2293 efiles = set()
2295 2294
2296 2295 # write changelog data to temp files so concurrent readers will not see
2297 2296 # inconsistent view
2298 2297 cl = self.changelog
2299 2298 cl.delayupdate()
2300 2299 oldheads = cl.heads()
2301 2300
2302 2301 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2303 2302 try:
2304 2303 trp = weakref.proxy(tr)
2305 2304 # pull off the changeset group
2306 2305 self.ui.status(_("adding changesets\n"))
2307 2306 clstart = len(cl)
2308 2307 class prog(object):
2309 2308 step = _('changesets')
2310 2309 count = 1
2311 2310 ui = self.ui
2312 2311 total = None
2313 2312 def __call__(self):
2314 2313 self.ui.progress(self.step, self.count, unit=_('chunks'),
2315 2314 total=self.total)
2316 2315 self.count += 1
2317 2316 pr = prog()
2318 2317 source.callback = pr
2319 2318
2320 2319 source.changelogheader()
2321 2320 srccontent = cl.addgroup(source, csmap, trp)
2322 2321 if not (srccontent or emptyok):
2323 2322 raise util.Abort(_("received changelog group is empty"))
2324 2323 clend = len(cl)
2325 2324 changesets = clend - clstart
2326 2325 for c in xrange(clstart, clend):
2327 2326 efiles.update(self[c].files())
2328 2327 efiles = len(efiles)
2329 2328 self.ui.progress(_('changesets'), None)
2330 2329
2331 2330 # pull off the manifest group
2332 2331 self.ui.status(_("adding manifests\n"))
2333 2332 pr.step = _('manifests')
2334 2333 pr.count = 1
2335 2334 pr.total = changesets # manifests <= changesets
2336 2335 # no need to check for empty manifest group here:
2337 2336 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2338 2337 # no new manifest will be created and the manifest group will
2339 2338 # be empty during the pull
2340 2339 source.manifestheader()
2341 2340 self.manifest.addgroup(source, revmap, trp)
2342 2341 self.ui.progress(_('manifests'), None)
2343 2342
2344 2343 needfiles = {}
2345 2344 if self.ui.configbool('server', 'validate', default=False):
2346 2345 # validate incoming csets have their manifests
2347 2346 for cset in xrange(clstart, clend):
2348 2347 mfest = self.changelog.read(self.changelog.node(cset))[0]
2349 2348 mfest = self.manifest.readdelta(mfest)
2350 2349 # store file nodes we must see
2351 2350 for f, n in mfest.iteritems():
2352 2351 needfiles.setdefault(f, set()).add(n)
2353 2352
2354 2353 # process the files
2355 2354 self.ui.status(_("adding file changes\n"))
2356 2355 pr.step = _('files')
2357 2356 pr.count = 1
2358 2357 pr.total = efiles
2359 2358 source.callback = None
2360 2359
2361 2360 while True:
2362 2361 chunkdata = source.filelogheader()
2363 2362 if not chunkdata:
2364 2363 break
2365 2364 f = chunkdata["filename"]
2366 2365 self.ui.debug("adding %s revisions\n" % f)
2367 2366 pr()
2368 2367 fl = self.file(f)
2369 2368 o = len(fl)
2370 2369 if not fl.addgroup(source, revmap, trp):
2371 2370 raise util.Abort(_("received file revlog group is empty"))
2372 2371 revisions += len(fl) - o
2373 2372 files += 1
2374 2373 if f in needfiles:
2375 2374 needs = needfiles[f]
2376 2375 for new in xrange(o, len(fl)):
2377 2376 n = fl.node(new)
2378 2377 if n in needs:
2379 2378 needs.remove(n)
2380 2379 if not needs:
2381 2380 del needfiles[f]
2382 2381 self.ui.progress(_('files'), None)
2383 2382
2384 2383 for f, needs in needfiles.iteritems():
2385 2384 fl = self.file(f)
2386 2385 for n in needs:
2387 2386 try:
2388 2387 fl.rev(n)
2389 2388 except error.LookupError:
2390 2389 raise util.Abort(
2391 2390 _('missing file data for %s:%s - run hg verify') %
2392 2391 (f, hex(n)))
2393 2392
2394 2393 dh = 0
2395 2394 if oldheads:
2396 2395 heads = cl.heads()
2397 2396 dh = len(heads) - len(oldheads)
2398 2397 for h in heads:
2399 2398 if h not in oldheads and self[h].closesbranch():
2400 2399 dh -= 1
2401 2400 htext = ""
2402 2401 if dh:
2403 2402 htext = _(" (%+d heads)") % dh
2404 2403
2405 2404 self.ui.status(_("added %d changesets"
2406 2405 " with %d changes to %d files%s\n")
2407 2406 % (changesets, revisions, files, htext))
2408 2407 obsolete.clearobscaches(self)
2409 2408
2410 2409 if changesets > 0:
2411 2410 p = lambda: cl.writepending() and self.root or ""
2412 2411 self.hook('pretxnchangegroup', throw=True,
2413 2412 node=hex(cl.node(clstart)), source=srctype,
2414 2413 url=url, pending=p)
2415 2414
2416 2415 added = [cl.node(r) for r in xrange(clstart, clend)]
2417 2416 publishing = self.ui.configbool('phases', 'publish', True)
2418 2417 if srctype == 'push':
2419 2418 # Old server can not push the boundary themself.
2420 2419 # New server won't push the boundary if changeset already
2421 2420 # existed locally as secrete
2422 2421 #
2423 2422 # We should not use added here but the list of all change in
2424 2423 # the bundle
2425 2424 if publishing:
2426 2425 phases.advanceboundary(self, phases.public, srccontent)
2427 2426 else:
2428 2427 phases.advanceboundary(self, phases.draft, srccontent)
2429 2428 phases.retractboundary(self, phases.draft, added)
2430 2429 elif srctype != 'strip':
2431 2430 # publishing only alter behavior during push
2432 2431 #
2433 2432 # strip should not touch boundary at all
2434 2433 phases.retractboundary(self, phases.draft, added)
2435 2434
2436 2435 # make changelog see real files again
2437 2436 cl.finalize(trp)
2438 2437
2439 2438 tr.close()
2440 2439
2441 2440 if changesets > 0:
2442 2441 self.updatebranchcache()
2443 2442 def runhooks():
2444 2443 # forcefully update the on-disk branch cache
2445 2444 self.ui.debug("updating the branch cache\n")
2446 2445 self.hook("changegroup", node=hex(cl.node(clstart)),
2447 2446 source=srctype, url=url)
2448 2447
2449 2448 for n in added:
2450 2449 self.hook("incoming", node=hex(n), source=srctype,
2451 2450 url=url)
2452 2451 self._afterlock(runhooks)
2453 2452
2454 2453 finally:
2455 2454 tr.release()
2456 2455 # never return 0 here:
2457 2456 if dh < 0:
2458 2457 return dh - 1
2459 2458 else:
2460 2459 return dh + 1
2461 2460
2462 2461 def stream_in(self, remote, requirements):
2463 2462 lock = self.lock()
2464 2463 try:
2465 2464 fp = remote.stream_out()
2466 2465 l = fp.readline()
2467 2466 try:
2468 2467 resp = int(l)
2469 2468 except ValueError:
2470 2469 raise error.ResponseError(
2471 2470 _('unexpected response from remote server:'), l)
2472 2471 if resp == 1:
2473 2472 raise util.Abort(_('operation forbidden by server'))
2474 2473 elif resp == 2:
2475 2474 raise util.Abort(_('locking the remote repository failed'))
2476 2475 elif resp != 0:
2477 2476 raise util.Abort(_('the server sent an unknown error code'))
2478 2477 self.ui.status(_('streaming all changes\n'))
2479 2478 l = fp.readline()
2480 2479 try:
2481 2480 total_files, total_bytes = map(int, l.split(' ', 1))
2482 2481 except (ValueError, TypeError):
2483 2482 raise error.ResponseError(
2484 2483 _('unexpected response from remote server:'), l)
2485 2484 self.ui.status(_('%d files to transfer, %s of data\n') %
2486 2485 (total_files, util.bytecount(total_bytes)))
2487 2486 handled_bytes = 0
2488 2487 self.ui.progress(_('clone'), 0, total=total_bytes)
2489 2488 start = time.time()
2490 2489 for i in xrange(total_files):
2491 2490 # XXX doesn't support '\n' or '\r' in filenames
2492 2491 l = fp.readline()
2493 2492 try:
2494 2493 name, size = l.split('\0', 1)
2495 2494 size = int(size)
2496 2495 except (ValueError, TypeError):
2497 2496 raise error.ResponseError(
2498 2497 _('unexpected response from remote server:'), l)
2499 2498 if self.ui.debugflag:
2500 2499 self.ui.debug('adding %s (%s)\n' %
2501 2500 (name, util.bytecount(size)))
2502 2501 # for backwards compat, name was partially encoded
2503 2502 ofp = self.sopener(store.decodedir(name), 'w')
2504 2503 for chunk in util.filechunkiter(fp, limit=size):
2505 2504 handled_bytes += len(chunk)
2506 2505 self.ui.progress(_('clone'), handled_bytes,
2507 2506 total=total_bytes)
2508 2507 ofp.write(chunk)
2509 2508 ofp.close()
2510 2509 elapsed = time.time() - start
2511 2510 if elapsed <= 0:
2512 2511 elapsed = 0.001
2513 2512 self.ui.progress(_('clone'), None)
2514 2513 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2515 2514 (util.bytecount(total_bytes), elapsed,
2516 2515 util.bytecount(total_bytes / elapsed)))
2517 2516
2518 2517 # new requirements = old non-format requirements +
2519 2518 # new format-related
2520 2519 # requirements from the streamed-in repository
2521 2520 requirements.update(set(self.requirements) - self.supportedformats)
2522 2521 self._applyrequirements(requirements)
2523 2522 self._writerequirements()
2524 2523
2525 2524 self.invalidate()
2526 2525 return len(self.heads()) + 1
2527 2526 finally:
2528 2527 lock.release()
2529 2528
2530 2529 def clone(self, remote, heads=[], stream=False):
2531 2530 '''clone remote repository.
2532 2531
2533 2532 keyword arguments:
2534 2533 heads: list of revs to clone (forces use of pull)
2535 2534 stream: use streaming clone if possible'''
2536 2535
2537 2536 # now, all clients that can request uncompressed clones can
2538 2537 # read repo formats supported by all servers that can serve
2539 2538 # them.
2540 2539
2541 2540 # if revlog format changes, client will have to check version
2542 2541 # and format flags on "stream" capability, and use
2543 2542 # uncompressed only if compatible.
2544 2543
2545 2544 if not stream:
2546 2545 # if the server explicitly prefers to stream (for fast LANs)
2547 2546 stream = remote.capable('stream-preferred')
2548 2547
2549 2548 if stream and not heads:
2550 2549 # 'stream' means remote revlog format is revlogv1 only
2551 2550 if remote.capable('stream'):
2552 2551 return self.stream_in(remote, set(('revlogv1',)))
2553 2552 # otherwise, 'streamreqs' contains the remote revlog format
2554 2553 streamreqs = remote.capable('streamreqs')
2555 2554 if streamreqs:
2556 2555 streamreqs = set(streamreqs.split(','))
2557 2556 # if we support it, stream in and adjust our requirements
2558 2557 if not streamreqs - self.supportedformats:
2559 2558 return self.stream_in(remote, streamreqs)
2560 2559 return self.pull(remote, heads)
2561 2560
2562 2561 def pushkey(self, namespace, key, old, new):
2563 2562 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2564 2563 old=old, new=new)
2565 2564 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2566 2565 ret = pushkey.push(self, namespace, key, old, new)
2567 2566 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2568 2567 ret=ret)
2569 2568 return ret
2570 2569
2571 2570 def listkeys(self, namespace):
2572 2571 self.hook('prelistkeys', throw=True, namespace=namespace)
2573 2572 self.ui.debug('listing keys for "%s"\n' % namespace)
2574 2573 values = pushkey.list(self, namespace)
2575 2574 self.hook('listkeys', namespace=namespace, values=values)
2576 2575 return values
2577 2576
2578 2577 def debugwireargs(self, one, two, three=None, four=None, five=None):
2579 2578 '''used to test argument passing over the wire'''
2580 2579 return "%s %s %s %s %s" % (one, two, three, four, five)
2581 2580
2582 2581 def savecommitmessage(self, text):
2583 2582 fp = self.opener('last-message.txt', 'wb')
2584 2583 try:
2585 2584 fp.write(text)
2586 2585 finally:
2587 2586 fp.close()
2588 2587 return self.pathto(fp.name[len(self.root)+1:])
2589 2588
2590 2589 # used to avoid circular references so destructors work
2591 2590 def aftertrans(files):
2592 2591 renamefiles = [tuple(t) for t in files]
2593 2592 def a():
2594 2593 for src, dest in renamefiles:
2595 2594 try:
2596 2595 util.rename(src, dest)
2597 2596 except OSError: # journal file does not yet exist
2598 2597 pass
2599 2598 return a
2600 2599
2601 2600 def undoname(fn):
2602 2601 base, name = os.path.split(fn)
2603 2602 assert name.startswith('journal')
2604 2603 return os.path.join(base, name.replace('journal', 'undo', 1))
2605 2604
2606 2605 def instance(ui, path, create):
2607 2606 return localrepository(ui, util.urllocalpath(path), create)
2608 2607
2609 2608 def islocal(path):
2610 2609 return True
@@ -1,1867 +1,1867
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import bookmarks as bookmarksmod
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16
17 17 def _revancestors(repo, revs, followfirst):
18 18 """Like revlog.ancestors(), but supports followfirst."""
19 19 cut = followfirst and 1 or None
20 20 cl = repo.changelog
21 21 visit = util.deque(revs)
22 22 seen = set([node.nullrev])
23 23 while visit:
24 24 for parent in cl.parentrevs(visit.popleft())[:cut]:
25 25 if parent not in seen:
26 26 visit.append(parent)
27 27 seen.add(parent)
28 28 yield parent
29 29
30 30 def _revdescendants(repo, revs, followfirst):
31 31 """Like revlog.descendants() but supports followfirst."""
32 32 cut = followfirst and 1 or None
33 33 cl = repo.changelog
34 34 first = min(revs)
35 35 nullrev = node.nullrev
36 36 if first == nullrev:
37 37 # Are there nodes with a null first parent and a non-null
38 38 # second one? Maybe. Do we care? Probably not.
39 39 for i in cl:
40 40 yield i
41 41 return
42 42
43 43 seen = set(revs)
44 44 for i in xrange(first + 1, len(cl)):
45 45 for x in cl.parentrevs(i)[:cut]:
46 46 if x != nullrev and x in seen:
47 47 seen.add(i)
48 48 yield i
49 49 break
50 50
51 51 def _revsbetween(repo, roots, heads):
52 52 """Return all paths between roots and heads, inclusive of both endpoint
53 53 sets."""
54 54 if not roots:
55 55 return []
56 56 parentrevs = repo.changelog.parentrevs
57 57 visit = heads[:]
58 58 reachable = set()
59 59 seen = {}
60 60 minroot = min(roots)
61 61 roots = set(roots)
62 62 # open-code the post-order traversal due to the tiny size of
63 63 # sys.getrecursionlimit()
64 64 while visit:
65 65 rev = visit.pop()
66 66 if rev in roots:
67 67 reachable.add(rev)
68 68 parents = parentrevs(rev)
69 69 seen[rev] = parents
70 70 for parent in parents:
71 71 if parent >= minroot and parent not in seen:
72 72 visit.append(parent)
73 73 if not reachable:
74 74 return []
75 75 for rev in sorted(seen):
76 76 for parent in seen[rev]:
77 77 if parent in reachable:
78 78 reachable.add(rev)
79 79 return sorted(reachable)
80 80
81 81 elements = {
82 82 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
83 83 "~": (18, None, ("ancestor", 18)),
84 84 "^": (18, None, ("parent", 18), ("parentpost", 18)),
85 85 "-": (5, ("negate", 19), ("minus", 5)),
86 86 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
87 87 ("dagrangepost", 17)),
88 88 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
89 89 ("dagrangepost", 17)),
90 90 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
91 91 "not": (10, ("not", 10)),
92 92 "!": (10, ("not", 10)),
93 93 "and": (5, None, ("and", 5)),
94 94 "&": (5, None, ("and", 5)),
95 95 "or": (4, None, ("or", 4)),
96 96 "|": (4, None, ("or", 4)),
97 97 "+": (4, None, ("or", 4)),
98 98 ",": (2, None, ("list", 2)),
99 99 ")": (0, None, None),
100 100 "symbol": (0, ("symbol",), None),
101 101 "string": (0, ("string",), None),
102 102 "end": (0, None, None),
103 103 }
104 104
105 105 keywords = set(['and', 'or', 'not'])
106 106
107 107 def tokenize(program):
108 108 pos, l = 0, len(program)
109 109 while pos < l:
110 110 c = program[pos]
111 111 if c.isspace(): # skip inter-token whitespace
112 112 pass
113 113 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
114 114 yield ('::', None, pos)
115 115 pos += 1 # skip ahead
116 116 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
117 117 yield ('..', None, pos)
118 118 pos += 1 # skip ahead
119 119 elif c in "():,-|&+!~^": # handle simple operators
120 120 yield (c, None, pos)
121 121 elif (c in '"\'' or c == 'r' and
122 122 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
123 123 if c == 'r':
124 124 pos += 1
125 125 c = program[pos]
126 126 decode = lambda x: x
127 127 else:
128 128 decode = lambda x: x.decode('string-escape')
129 129 pos += 1
130 130 s = pos
131 131 while pos < l: # find closing quote
132 132 d = program[pos]
133 133 if d == '\\': # skip over escaped characters
134 134 pos += 2
135 135 continue
136 136 if d == c:
137 137 yield ('string', decode(program[s:pos]), s)
138 138 break
139 139 pos += 1
140 140 else:
141 141 raise error.ParseError(_("unterminated string"), s)
142 142 # gather up a symbol/keyword
143 143 elif c.isalnum() or c in '._' or ord(c) > 127:
144 144 s = pos
145 145 pos += 1
146 146 while pos < l: # find end of symbol
147 147 d = program[pos]
148 148 if not (d.isalnum() or d in "._/" or ord(d) > 127):
149 149 break
150 150 if d == '.' and program[pos - 1] == '.': # special case for ..
151 151 pos -= 1
152 152 break
153 153 pos += 1
154 154 sym = program[s:pos]
155 155 if sym in keywords: # operator keywords
156 156 yield (sym, None, s)
157 157 else:
158 158 yield ('symbol', sym, s)
159 159 pos -= 1
160 160 else:
161 161 raise error.ParseError(_("syntax error"), pos)
162 162 pos += 1
163 163 yield ('end', None, pos)
164 164
165 165 # helpers
166 166
167 167 def getstring(x, err):
168 168 if x and (x[0] == 'string' or x[0] == 'symbol'):
169 169 return x[1]
170 170 raise error.ParseError(err)
171 171
172 172 def getlist(x):
173 173 if not x:
174 174 return []
175 175 if x[0] == 'list':
176 176 return getlist(x[1]) + [x[2]]
177 177 return [x]
178 178
179 179 def getargs(x, min, max, err):
180 180 l = getlist(x)
181 181 if len(l) < min or (max >= 0 and len(l) > max):
182 182 raise error.ParseError(err)
183 183 return l
184 184
185 185 def getset(repo, subset, x):
186 186 if not x:
187 187 raise error.ParseError(_("missing argument"))
188 188 return methods[x[0]](repo, subset, *x[1:])
189 189
190 190 def _getrevsource(repo, r):
191 191 extra = repo[r].extra()
192 192 for label in ('source', 'transplant_source', 'rebase_source'):
193 193 if label in extra:
194 194 try:
195 195 return repo[extra[label]].rev()
196 196 except error.RepoLookupError:
197 197 pass
198 198 return None
199 199
200 200 # operator methods
201 201
202 202 def stringset(repo, subset, x):
203 203 x = repo[x].rev()
204 204 if x == -1 and len(subset) == len(repo):
205 205 return [-1]
206 206 if len(subset) == len(repo) or x in subset:
207 207 return [x]
208 208 return []
209 209
210 210 def symbolset(repo, subset, x):
211 211 if x in symbols:
212 212 raise error.ParseError(_("can't use %s here") % x)
213 213 return stringset(repo, subset, x)
214 214
215 215 def rangeset(repo, subset, x, y):
216 216 m = getset(repo, subset, x)
217 217 if not m:
218 m = getset(repo, range(len(repo)), x)
218 m = getset(repo, list(repo), x)
219 219
220 220 n = getset(repo, subset, y)
221 221 if not n:
222 n = getset(repo, range(len(repo)), y)
222 n = getset(repo, list(repo), y)
223 223
224 224 if not m or not n:
225 225 return []
226 226 m, n = m[0], n[-1]
227 227
228 228 if m < n:
229 229 r = range(m, n + 1)
230 230 else:
231 231 r = range(m, n - 1, -1)
232 232 s = set(subset)
233 233 return [x for x in r if x in s]
234 234
235 235 def dagrange(repo, subset, x, y):
236 236 if subset:
237 r = range(len(repo))
237 r = list(repo)
238 238 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
239 239 s = set(subset)
240 240 return [r for r in xs if r in s]
241 241 return []
242 242
243 243 def andset(repo, subset, x, y):
244 244 return getset(repo, getset(repo, subset, x), y)
245 245
246 246 def orset(repo, subset, x, y):
247 247 xl = getset(repo, subset, x)
248 248 s = set(xl)
249 249 yl = getset(repo, [r for r in subset if r not in s], y)
250 250 return xl + yl
251 251
252 252 def notset(repo, subset, x):
253 253 s = set(getset(repo, subset, x))
254 254 return [r for r in subset if r not in s]
255 255
256 256 def listset(repo, subset, a, b):
257 257 raise error.ParseError(_("can't use a list in this context"))
258 258
259 259 def func(repo, subset, a, b):
260 260 if a[0] == 'symbol' and a[1] in symbols:
261 261 return symbols[a[1]](repo, subset, b)
262 262 raise error.ParseError(_("not a function: %s") % a[1])
263 263
264 264 # functions
265 265
266 266 def adds(repo, subset, x):
267 267 """``adds(pattern)``
268 268 Changesets that add a file matching pattern.
269 269 """
270 270 # i18n: "adds" is a keyword
271 271 pat = getstring(x, _("adds requires a pattern"))
272 272 return checkstatus(repo, subset, pat, 1)
273 273
274 274 def ancestor(repo, subset, x):
275 275 """``ancestor(single, single)``
276 276 Greatest common ancestor of the two changesets.
277 277 """
278 278 # i18n: "ancestor" is a keyword
279 279 l = getargs(x, 2, 2, _("ancestor requires two arguments"))
280 r = range(len(repo))
280 r = list(repo)
281 281 a = getset(repo, r, l[0])
282 282 b = getset(repo, r, l[1])
283 283 if len(a) != 1 or len(b) != 1:
284 284 # i18n: "ancestor" is a keyword
285 285 raise error.ParseError(_("ancestor arguments must be single revisions"))
286 286 an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
287 287
288 288 return [r for r in an if r in subset]
289 289
290 290 def _ancestors(repo, subset, x, followfirst=False):
291 args = getset(repo, range(len(repo)), x)
291 args = getset(repo, list(repo), x)
292 292 if not args:
293 293 return []
294 294 s = set(_revancestors(repo, args, followfirst)) | set(args)
295 295 return [r for r in subset if r in s]
296 296
297 297 def ancestors(repo, subset, x):
298 298 """``ancestors(set)``
299 299 Changesets that are ancestors of a changeset in set.
300 300 """
301 301 return _ancestors(repo, subset, x)
302 302
303 303 def _firstancestors(repo, subset, x):
304 304 # ``_firstancestors(set)``
305 305 # Like ``ancestors(set)`` but follows only the first parents.
306 306 return _ancestors(repo, subset, x, followfirst=True)
307 307
308 308 def ancestorspec(repo, subset, x, n):
309 309 """``set~n``
310 310 Changesets that are the Nth ancestor (first parents only) of a changeset
311 311 in set.
312 312 """
313 313 try:
314 314 n = int(n[1])
315 315 except (TypeError, ValueError):
316 316 raise error.ParseError(_("~ expects a number"))
317 317 ps = set()
318 318 cl = repo.changelog
319 319 for r in getset(repo, subset, x):
320 320 for i in range(n):
321 321 r = cl.parentrevs(r)[0]
322 322 ps.add(r)
323 323 return [r for r in subset if r in ps]
324 324
325 325 def author(repo, subset, x):
326 326 """``author(string)``
327 327 Alias for ``user(string)``.
328 328 """
329 329 # i18n: "author" is a keyword
330 330 n = encoding.lower(getstring(x, _("author requires a string")))
331 331 kind, pattern, matcher = _substringmatcher(n)
332 332 return [r for r in subset if matcher(encoding.lower(repo[r].user()))]
333 333
334 334 def bisect(repo, subset, x):
335 335 """``bisect(string)``
336 336 Changesets marked in the specified bisect status:
337 337
338 338 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
339 339 - ``goods``, ``bads`` : csets topologically good/bad
340 340 - ``range`` : csets taking part in the bisection
341 341 - ``pruned`` : csets that are goods, bads or skipped
342 342 - ``untested`` : csets whose fate is yet unknown
343 343 - ``ignored`` : csets ignored due to DAG topology
344 344 - ``current`` : the cset currently being bisected
345 345 """
346 346 # i18n: "bisect" is a keyword
347 347 status = getstring(x, _("bisect requires a string")).lower()
348 348 state = set(hbisect.get(repo, status))
349 349 return [r for r in subset if r in state]
350 350
351 351 # Backward-compatibility
352 352 # - no help entry so that we do not advertise it any more
353 353 def bisected(repo, subset, x):
354 354 return bisect(repo, subset, x)
355 355
356 356 def bookmark(repo, subset, x):
357 357 """``bookmark([name])``
358 358 The named bookmark or all bookmarks.
359 359
360 360 If `name` starts with `re:`, the remainder of the name is treated as
361 361 a regular expression. To match a bookmark that actually starts with `re:`,
362 362 use the prefix `literal:`.
363 363 """
364 364 # i18n: "bookmark" is a keyword
365 365 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
366 366 if args:
367 367 bm = getstring(args[0],
368 368 # i18n: "bookmark" is a keyword
369 369 _('the argument to bookmark must be a string'))
370 370 kind, pattern, matcher = _stringmatcher(bm)
371 371 if kind == 'literal':
372 372 bmrev = bookmarksmod.listbookmarks(repo).get(bm, None)
373 373 if not bmrev:
374 374 raise util.Abort(_("bookmark '%s' does not exist") % bm)
375 375 bmrev = repo[bmrev].rev()
376 376 return [r for r in subset if r == bmrev]
377 377 else:
378 378 matchrevs = set()
379 379 for name, bmrev in bookmarksmod.listbookmarks(repo).iteritems():
380 380 if matcher(name):
381 381 matchrevs.add(bmrev)
382 382 if not matchrevs:
383 383 raise util.Abort(_("no bookmarks exist that match '%s'")
384 384 % pattern)
385 385 bmrevs = set()
386 386 for bmrev in matchrevs:
387 387 bmrevs.add(repo[bmrev].rev())
388 388 return [r for r in subset if r in bmrevs]
389 389
390 390 bms = set([repo[r].rev()
391 391 for r in bookmarksmod.listbookmarks(repo).values()])
392 392 return [r for r in subset if r in bms]
393 393
394 394 def branch(repo, subset, x):
395 395 """``branch(string or set)``
396 396 All changesets belonging to the given branch or the branches of the given
397 397 changesets.
398 398
399 399 If `string` starts with `re:`, the remainder of the name is treated as
400 400 a regular expression. To match a branch that actually starts with `re:`,
401 401 use the prefix `literal:`.
402 402 """
403 403 try:
404 404 b = getstring(x, '')
405 405 except error.ParseError:
406 406 # not a string, but another revspec, e.g. tip()
407 407 pass
408 408 else:
409 409 kind, pattern, matcher = _stringmatcher(b)
410 410 if kind == 'literal':
411 411 # note: falls through to the revspec case if no branch with
412 412 # this name exists
413 413 if pattern in repo.branchmap():
414 414 return [r for r in subset if matcher(repo[r].branch())]
415 415 else:
416 416 return [r for r in subset if matcher(repo[r].branch())]
417 417
418 s = getset(repo, range(len(repo)), x)
418 s = getset(repo, list(repo), x)
419 419 b = set()
420 420 for r in s:
421 421 b.add(repo[r].branch())
422 422 s = set(s)
423 423 return [r for r in subset if r in s or repo[r].branch() in b]
424 424
425 425 def checkstatus(repo, subset, pat, field):
426 426 m = None
427 427 s = []
428 428 hasset = matchmod.patkind(pat) == 'set'
429 429 fname = None
430 430 for r in subset:
431 431 c = repo[r]
432 432 if not m or hasset:
433 433 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
434 434 if not m.anypats() and len(m.files()) == 1:
435 435 fname = m.files()[0]
436 436 if fname is not None:
437 437 if fname not in c.files():
438 438 continue
439 439 else:
440 440 for f in c.files():
441 441 if m(f):
442 442 break
443 443 else:
444 444 continue
445 445 files = repo.status(c.p1().node(), c.node())[field]
446 446 if fname is not None:
447 447 if fname in files:
448 448 s.append(r)
449 449 else:
450 450 for f in files:
451 451 if m(f):
452 452 s.append(r)
453 453 break
454 454 return s
455 455
456 456 def _children(repo, narrow, parentset):
457 457 cs = set()
458 458 pr = repo.changelog.parentrevs
459 459 for r in narrow:
460 460 for p in pr(r):
461 461 if p in parentset:
462 462 cs.add(r)
463 463 return cs
464 464
465 465 def children(repo, subset, x):
466 466 """``children(set)``
467 467 Child changesets of changesets in set.
468 468 """
469 s = set(getset(repo, range(len(repo)), x))
469 s = set(getset(repo, list(repo), x))
470 470 cs = _children(repo, subset, s)
471 471 return [r for r in subset if r in cs]
472 472
473 473 def closed(repo, subset, x):
474 474 """``closed()``
475 475 Changeset is closed.
476 476 """
477 477 # i18n: "closed" is a keyword
478 478 getargs(x, 0, 0, _("closed takes no arguments"))
479 479 return [r for r in subset if repo[r].closesbranch()]
480 480
481 481 def contains(repo, subset, x):
482 482 """``contains(pattern)``
483 483 Revision contains a file matching pattern. See :hg:`help patterns`
484 484 for information about file patterns.
485 485 """
486 486 # i18n: "contains" is a keyword
487 487 pat = getstring(x, _("contains requires a pattern"))
488 488 m = None
489 489 s = []
490 490 if not matchmod.patkind(pat):
491 491 for r in subset:
492 492 if pat in repo[r]:
493 493 s.append(r)
494 494 else:
495 495 for r in subset:
496 496 c = repo[r]
497 497 if not m or matchmod.patkind(pat) == 'set':
498 498 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
499 499 for f in c.manifest():
500 500 if m(f):
501 501 s.append(r)
502 502 break
503 503 return s
504 504
505 505 def converted(repo, subset, x):
506 506 """``converted([id])``
507 507 Changesets converted from the given identifier in the old repository if
508 508 present, or all converted changesets if no identifier is specified.
509 509 """
510 510
511 511 # There is exactly no chance of resolving the revision, so do a simple
512 512 # string compare and hope for the best
513 513
514 514 rev = None
515 515 # i18n: "converted" is a keyword
516 516 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
517 517 if l:
518 518 # i18n: "converted" is a keyword
519 519 rev = getstring(l[0], _('converted requires a revision'))
520 520
521 521 def _matchvalue(r):
522 522 source = repo[r].extra().get('convert_revision', None)
523 523 return source is not None and (rev is None or source.startswith(rev))
524 524
525 525 return [r for r in subset if _matchvalue(r)]
526 526
527 527 def date(repo, subset, x):
528 528 """``date(interval)``
529 529 Changesets within the interval, see :hg:`help dates`.
530 530 """
531 531 # i18n: "date" is a keyword
532 532 ds = getstring(x, _("date requires a string"))
533 533 dm = util.matchdate(ds)
534 534 return [r for r in subset if dm(repo[r].date()[0])]
535 535
536 536 def desc(repo, subset, x):
537 537 """``desc(string)``
538 538 Search commit message for string. The match is case-insensitive.
539 539 """
540 540 # i18n: "desc" is a keyword
541 541 ds = encoding.lower(getstring(x, _("desc requires a string")))
542 542 l = []
543 543 for r in subset:
544 544 c = repo[r]
545 545 if ds in encoding.lower(c.description()):
546 546 l.append(r)
547 547 return l
548 548
549 549 def _descendants(repo, subset, x, followfirst=False):
550 args = getset(repo, range(len(repo)), x)
550 args = getset(repo, list(repo), x)
551 551 if not args:
552 552 return []
553 553 s = set(_revdescendants(repo, args, followfirst)) | set(args)
554 554 return [r for r in subset if r in s]
555 555
556 556 def descendants(repo, subset, x):
557 557 """``descendants(set)``
558 558 Changesets which are descendants of changesets in set.
559 559 """
560 560 return _descendants(repo, subset, x)
561 561
562 562 def _firstdescendants(repo, subset, x):
563 563 # ``_firstdescendants(set)``
564 564 # Like ``descendants(set)`` but follows only the first parents.
565 565 return _descendants(repo, subset, x, followfirst=True)
566 566
567 567 def destination(repo, subset, x):
568 568 """``destination([set])``
569 569 Changesets that were created by a graft, transplant or rebase operation,
570 570 with the given revisions specified as the source. Omitting the optional set
571 571 is the same as passing all().
572 572 """
573 573 if x is not None:
574 args = set(getset(repo, range(len(repo)), x))
574 args = set(getset(repo, list(repo), x))
575 575 else:
576 args = set(getall(repo, range(len(repo)), x))
576 args = set(getall(repo, list(repo), x))
577 577
578 578 dests = set()
579 579
580 580 # subset contains all of the possible destinations that can be returned, so
581 581 # iterate over them and see if their source(s) were provided in the args.
582 582 # Even if the immediate src of r is not in the args, src's source (or
583 583 # further back) may be. Scanning back further than the immediate src allows
584 584 # transitive transplants and rebases to yield the same results as transitive
585 585 # grafts.
586 586 for r in subset:
587 587 src = _getrevsource(repo, r)
588 588 lineage = None
589 589
590 590 while src is not None:
591 591 if lineage is None:
592 592 lineage = list()
593 593
594 594 lineage.append(r)
595 595
596 596 # The visited lineage is a match if the current source is in the arg
597 597 # set. Since every candidate dest is visited by way of iterating
598 598 # subset, any dests further back in the lineage will be tested by a
599 599 # different iteration over subset. Likewise, if the src was already
600 600 # selected, the current lineage can be selected without going back
601 601 # further.
602 602 if src in args or src in dests:
603 603 dests.update(lineage)
604 604 break
605 605
606 606 r = src
607 607 src = _getrevsource(repo, r)
608 608
609 609 return [r for r in subset if r in dests]
610 610
611 611 def draft(repo, subset, x):
612 612 """``draft()``
613 613 Changeset in draft phase."""
614 614 # i18n: "draft" is a keyword
615 615 getargs(x, 0, 0, _("draft takes no arguments"))
616 616 pc = repo._phasecache
617 617 return [r for r in subset if pc.phase(repo, r) == phases.draft]
618 618
619 619 def extinct(repo, subset, x):
620 620 """``extinct()``
621 621 Obsolete changesets with obsolete descendants only.
622 622 """
623 623 # i18n: "extinct" is a keyword
624 624 getargs(x, 0, 0, _("extinct takes no arguments"))
625 625 extincts = obsmod.getobscache(repo, 'extinct')
626 626 return [r for r in subset if r in extincts]
627 627
628 628 def extra(repo, subset, x):
629 629 """``extra(label, [value])``
630 630 Changesets with the given label in the extra metadata, with the given
631 631 optional value.
632 632
633 633 If `value` starts with `re:`, the remainder of the value is treated as
634 634 a regular expression. To match a value that actually starts with `re:`,
635 635 use the prefix `literal:`.
636 636 """
637 637
638 638 # i18n: "extra" is a keyword
639 639 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
640 640 # i18n: "extra" is a keyword
641 641 label = getstring(l[0], _('first argument to extra must be a string'))
642 642 value = None
643 643
644 644 if len(l) > 1:
645 645 # i18n: "extra" is a keyword
646 646 value = getstring(l[1], _('second argument to extra must be a string'))
647 647 kind, value, matcher = _stringmatcher(value)
648 648
649 649 def _matchvalue(r):
650 650 extra = repo[r].extra()
651 651 return label in extra and (value is None or matcher(extra[label]))
652 652
653 653 return [r for r in subset if _matchvalue(r)]
654 654
655 655 def filelog(repo, subset, x):
656 656 """``filelog(pattern)``
657 657 Changesets connected to the specified filelog.
658 658
659 659 For performance reasons, ``filelog()`` does not show every changeset
660 660 that affects the requested file(s). See :hg:`help log` for details. For
661 661 a slower, more accurate result, use ``file()``.
662 662 """
663 663
664 664 # i18n: "filelog" is a keyword
665 665 pat = getstring(x, _("filelog requires a pattern"))
666 666 m = matchmod.match(repo.root, repo.getcwd(), [pat], default='relpath',
667 667 ctx=repo[None])
668 668 s = set()
669 669
670 670 if not matchmod.patkind(pat):
671 671 for f in m.files():
672 672 fl = repo.file(f)
673 673 for fr in fl:
674 674 s.add(fl.linkrev(fr))
675 675 else:
676 676 for f in repo[None]:
677 677 if m(f):
678 678 fl = repo.file(f)
679 679 for fr in fl:
680 680 s.add(fl.linkrev(fr))
681 681
682 682 return [r for r in subset if r in s]
683 683
684 684 def first(repo, subset, x):
685 685 """``first(set, [n])``
686 686 An alias for limit().
687 687 """
688 688 return limit(repo, subset, x)
689 689
690 690 def _follow(repo, subset, x, name, followfirst=False):
691 691 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
692 692 c = repo['.']
693 693 if l:
694 694 x = getstring(l[0], _("%s expected a filename") % name)
695 695 if x in c:
696 696 cx = c[x]
697 697 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
698 698 # include the revision responsible for the most recent version
699 699 s.add(cx.linkrev())
700 700 else:
701 701 return []
702 702 else:
703 703 s = set(_revancestors(repo, [c.rev()], followfirst)) | set([c.rev()])
704 704
705 705 return [r for r in subset if r in s]
706 706
707 707 def follow(repo, subset, x):
708 708 """``follow([file])``
709 709 An alias for ``::.`` (ancestors of the working copy's first parent).
710 710 If a filename is specified, the history of the given file is followed,
711 711 including copies.
712 712 """
713 713 return _follow(repo, subset, x, 'follow')
714 714
715 715 def _followfirst(repo, subset, x):
716 716 # ``followfirst([file])``
717 717 # Like ``follow([file])`` but follows only the first parent of
718 718 # every revision or file revision.
719 719 return _follow(repo, subset, x, '_followfirst', followfirst=True)
720 720
721 721 def getall(repo, subset, x):
722 722 """``all()``
723 723 All changesets, the same as ``0:tip``.
724 724 """
725 725 # i18n: "all" is a keyword
726 726 getargs(x, 0, 0, _("all takes no arguments"))
727 727 return subset
728 728
729 729 def grep(repo, subset, x):
730 730 """``grep(regex)``
731 731 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
732 732 to ensure special escape characters are handled correctly. Unlike
733 733 ``keyword(string)``, the match is case-sensitive.
734 734 """
735 735 try:
736 736 # i18n: "grep" is a keyword
737 737 gr = re.compile(getstring(x, _("grep requires a string")))
738 738 except re.error, e:
739 739 raise error.ParseError(_('invalid match pattern: %s') % e)
740 740 l = []
741 741 for r in subset:
742 742 c = repo[r]
743 743 for e in c.files() + [c.user(), c.description()]:
744 744 if gr.search(e):
745 745 l.append(r)
746 746 break
747 747 return l
748 748
749 749 def _matchfiles(repo, subset, x):
750 750 # _matchfiles takes a revset list of prefixed arguments:
751 751 #
752 752 # [p:foo, i:bar, x:baz]
753 753 #
754 754 # builds a match object from them and filters subset. Allowed
755 755 # prefixes are 'p:' for regular patterns, 'i:' for include
756 756 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
757 757 # a revision identifier, or the empty string to reference the
758 758 # working directory, from which the match object is
759 759 # initialized. Use 'd:' to set the default matching mode, default
760 760 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
761 761
762 762 # i18n: "_matchfiles" is a keyword
763 763 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
764 764 pats, inc, exc = [], [], []
765 765 hasset = False
766 766 rev, default = None, None
767 767 for arg in l:
768 768 # i18n: "_matchfiles" is a keyword
769 769 s = getstring(arg, _("_matchfiles requires string arguments"))
770 770 prefix, value = s[:2], s[2:]
771 771 if prefix == 'p:':
772 772 pats.append(value)
773 773 elif prefix == 'i:':
774 774 inc.append(value)
775 775 elif prefix == 'x:':
776 776 exc.append(value)
777 777 elif prefix == 'r:':
778 778 if rev is not None:
779 779 # i18n: "_matchfiles" is a keyword
780 780 raise error.ParseError(_('_matchfiles expected at most one '
781 781 'revision'))
782 782 rev = value
783 783 elif prefix == 'd:':
784 784 if default is not None:
785 785 # i18n: "_matchfiles" is a keyword
786 786 raise error.ParseError(_('_matchfiles expected at most one '
787 787 'default mode'))
788 788 default = value
789 789 else:
790 790 # i18n: "_matchfiles" is a keyword
791 791 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
792 792 if not hasset and matchmod.patkind(value) == 'set':
793 793 hasset = True
794 794 if not default:
795 795 default = 'glob'
796 796 m = None
797 797 s = []
798 798 for r in subset:
799 799 c = repo[r]
800 800 if not m or (hasset and rev is None):
801 801 ctx = c
802 802 if rev is not None:
803 803 ctx = repo[rev or None]
804 804 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
805 805 exclude=exc, ctx=ctx, default=default)
806 806 for f in c.files():
807 807 if m(f):
808 808 s.append(r)
809 809 break
810 810 return s
811 811
812 812 def hasfile(repo, subset, x):
813 813 """``file(pattern)``
814 814 Changesets affecting files matched by pattern.
815 815
816 816 For a faster but less accurate result, consider using ``filelog()``
817 817 instead.
818 818 """
819 819 # i18n: "file" is a keyword
820 820 pat = getstring(x, _("file requires a pattern"))
821 821 return _matchfiles(repo, subset, ('string', 'p:' + pat))
822 822
823 823 def head(repo, subset, x):
824 824 """``head()``
825 825 Changeset is a named branch head.
826 826 """
827 827 # i18n: "head" is a keyword
828 828 getargs(x, 0, 0, _("head takes no arguments"))
829 829 hs = set()
830 830 for b, ls in repo.branchmap().iteritems():
831 831 hs.update(repo[h].rev() for h in ls)
832 832 return [r for r in subset if r in hs]
833 833
834 834 def heads(repo, subset, x):
835 835 """``heads(set)``
836 836 Members of set with no children in set.
837 837 """
838 838 s = getset(repo, subset, x)
839 839 ps = set(parents(repo, subset, x))
840 840 return [r for r in s if r not in ps]
841 841
842 842 def hidden(repo, subset, x):
843 843 """``hidden()``
844 844 Hidden changesets.
845 845 """
846 846 # i18n: "hidden" is a keyword
847 847 getargs(x, 0, 0, _("hidden takes no arguments"))
848 848 return [r for r in subset if r in repo.hiddenrevs]
849 849
850 850 def keyword(repo, subset, x):
851 851 """``keyword(string)``
852 852 Search commit message, user name, and names of changed files for
853 853 string. The match is case-insensitive.
854 854 """
855 855 # i18n: "keyword" is a keyword
856 856 kw = encoding.lower(getstring(x, _("keyword requires a string")))
857 857 l = []
858 858 for r in subset:
859 859 c = repo[r]
860 860 t = " ".join(c.files() + [c.user(), c.description()])
861 861 if kw in encoding.lower(t):
862 862 l.append(r)
863 863 return l
864 864
865 865 def limit(repo, subset, x):
866 866 """``limit(set, [n])``
867 867 First n members of set, defaulting to 1.
868 868 """
869 869 # i18n: "limit" is a keyword
870 870 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
871 871 try:
872 872 lim = 1
873 873 if len(l) == 2:
874 874 # i18n: "limit" is a keyword
875 875 lim = int(getstring(l[1], _("limit requires a number")))
876 876 except (TypeError, ValueError):
877 877 # i18n: "limit" is a keyword
878 878 raise error.ParseError(_("limit expects a number"))
879 879 ss = set(subset)
880 os = getset(repo, range(len(repo)), l[0])[:lim]
880 os = getset(repo, list(repo), l[0])[:lim]
881 881 return [r for r in os if r in ss]
882 882
883 883 def last(repo, subset, x):
884 884 """``last(set, [n])``
885 885 Last n members of set, defaulting to 1.
886 886 """
887 887 # i18n: "last" is a keyword
888 888 l = getargs(x, 1, 2, _("last requires one or two arguments"))
889 889 try:
890 890 lim = 1
891 891 if len(l) == 2:
892 892 # i18n: "last" is a keyword
893 893 lim = int(getstring(l[1], _("last requires a number")))
894 894 except (TypeError, ValueError):
895 895 # i18n: "last" is a keyword
896 896 raise error.ParseError(_("last expects a number"))
897 897 ss = set(subset)
898 os = getset(repo, range(len(repo)), l[0])[-lim:]
898 os = getset(repo, list(repo), l[0])[-lim:]
899 899 return [r for r in os if r in ss]
900 900
901 901 def maxrev(repo, subset, x):
902 902 """``max(set)``
903 903 Changeset with highest revision number in set.
904 904 """
905 os = getset(repo, range(len(repo)), x)
905 os = getset(repo, list(repo), x)
906 906 if os:
907 907 m = max(os)
908 908 if m in subset:
909 909 return [m]
910 910 return []
911 911
912 912 def merge(repo, subset, x):
913 913 """``merge()``
914 914 Changeset is a merge changeset.
915 915 """
916 916 # i18n: "merge" is a keyword
917 917 getargs(x, 0, 0, _("merge takes no arguments"))
918 918 cl = repo.changelog
919 919 return [r for r in subset if cl.parentrevs(r)[1] != -1]
920 920
921 921 def minrev(repo, subset, x):
922 922 """``min(set)``
923 923 Changeset with lowest revision number in set.
924 924 """
925 os = getset(repo, range(len(repo)), x)
925 os = getset(repo, list(repo), x)
926 926 if os:
927 927 m = min(os)
928 928 if m in subset:
929 929 return [m]
930 930 return []
931 931
932 932 def modifies(repo, subset, x):
933 933 """``modifies(pattern)``
934 934 Changesets modifying files matched by pattern.
935 935 """
936 936 # i18n: "modifies" is a keyword
937 937 pat = getstring(x, _("modifies requires a pattern"))
938 938 return checkstatus(repo, subset, pat, 0)
939 939
940 940 def node_(repo, subset, x):
941 941 """``id(string)``
942 942 Revision non-ambiguously specified by the given hex string prefix.
943 943 """
944 944 # i18n: "id" is a keyword
945 945 l = getargs(x, 1, 1, _("id requires one argument"))
946 946 # i18n: "id" is a keyword
947 947 n = getstring(l[0], _("id requires a string"))
948 948 if len(n) == 40:
949 949 rn = repo[n].rev()
950 950 else:
951 951 rn = None
952 952 pm = repo.changelog._partialmatch(n)
953 953 if pm is not None:
954 954 rn = repo.changelog.rev(pm)
955 955
956 956 return [r for r in subset if r == rn]
957 957
958 958 def obsolete(repo, subset, x):
959 959 """``obsolete()``
960 960 Mutable changeset with a newer version."""
961 961 # i18n: "obsolete" is a keyword
962 962 getargs(x, 0, 0, _("obsolete takes no arguments"))
963 963 obsoletes = obsmod.getobscache(repo, 'obsolete')
964 964 return [r for r in subset if r in obsoletes]
965 965
966 966 def origin(repo, subset, x):
967 967 """``origin([set])``
968 968 Changesets that were specified as a source for the grafts, transplants or
969 969 rebases that created the given revisions. Omitting the optional set is the
970 970 same as passing all(). If a changeset created by these operations is itself
971 971 specified as a source for one of these operations, only the source changeset
972 972 for the first operation is selected.
973 973 """
974 974 if x is not None:
975 args = set(getset(repo, range(len(repo)), x))
975 args = set(getset(repo, list(repo), x))
976 976 else:
977 args = set(getall(repo, range(len(repo)), x))
977 args = set(getall(repo, list(repo), x))
978 978
979 979 def _firstsrc(rev):
980 980 src = _getrevsource(repo, rev)
981 981 if src is None:
982 982 return None
983 983
984 984 while True:
985 985 prev = _getrevsource(repo, src)
986 986
987 987 if prev is None:
988 988 return src
989 989 src = prev
990 990
991 991 o = set([_firstsrc(r) for r in args])
992 992 return [r for r in subset if r in o]
993 993
994 994 def outgoing(repo, subset, x):
995 995 """``outgoing([path])``
996 996 Changesets not found in the specified destination repository, or the
997 997 default push location.
998 998 """
999 999 import hg # avoid start-up nasties
1000 1000 # i18n: "outgoing" is a keyword
1001 1001 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1002 1002 # i18n: "outgoing" is a keyword
1003 1003 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1004 1004 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1005 1005 dest, branches = hg.parseurl(dest)
1006 1006 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1007 1007 if revs:
1008 1008 revs = [repo.lookup(rev) for rev in revs]
1009 1009 other = hg.peer(repo, {}, dest)
1010 1010 repo.ui.pushbuffer()
1011 1011 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1012 1012 repo.ui.popbuffer()
1013 1013 cl = repo.changelog
1014 1014 o = set([cl.rev(r) for r in outgoing.missing])
1015 1015 return [r for r in subset if r in o]
1016 1016
1017 1017 def p1(repo, subset, x):
1018 1018 """``p1([set])``
1019 1019 First parent of changesets in set, or the working directory.
1020 1020 """
1021 1021 if x is None:
1022 1022 p = repo[x].p1().rev()
1023 1023 return [r for r in subset if r == p]
1024 1024
1025 1025 ps = set()
1026 1026 cl = repo.changelog
1027 for r in getset(repo, range(len(repo)), x):
1027 for r in getset(repo, list(repo), x):
1028 1028 ps.add(cl.parentrevs(r)[0])
1029 1029 return [r for r in subset if r in ps]
1030 1030
1031 1031 def p2(repo, subset, x):
1032 1032 """``p2([set])``
1033 1033 Second parent of changesets in set, or the working directory.
1034 1034 """
1035 1035 if x is None:
1036 1036 ps = repo[x].parents()
1037 1037 try:
1038 1038 p = ps[1].rev()
1039 1039 return [r for r in subset if r == p]
1040 1040 except IndexError:
1041 1041 return []
1042 1042
1043 1043 ps = set()
1044 1044 cl = repo.changelog
1045 for r in getset(repo, range(len(repo)), x):
1045 for r in getset(repo, list(repo), x):
1046 1046 ps.add(cl.parentrevs(r)[1])
1047 1047 return [r for r in subset if r in ps]
1048 1048
1049 1049 def parents(repo, subset, x):
1050 1050 """``parents([set])``
1051 1051 The set of all parents for all changesets in set, or the working directory.
1052 1052 """
1053 1053 if x is None:
1054 1054 ps = tuple(p.rev() for p in repo[x].parents())
1055 1055 return [r for r in subset if r in ps]
1056 1056
1057 1057 ps = set()
1058 1058 cl = repo.changelog
1059 for r in getset(repo, range(len(repo)), x):
1059 for r in getset(repo, list(repo), x):
1060 1060 ps.update(cl.parentrevs(r))
1061 1061 return [r for r in subset if r in ps]
1062 1062
1063 1063 def parentspec(repo, subset, x, n):
1064 1064 """``set^0``
1065 1065 The set.
1066 1066 ``set^1`` (or ``set^``), ``set^2``
1067 1067 First or second parent, respectively, of all changesets in set.
1068 1068 """
1069 1069 try:
1070 1070 n = int(n[1])
1071 1071 if n not in (0, 1, 2):
1072 1072 raise ValueError
1073 1073 except (TypeError, ValueError):
1074 1074 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1075 1075 ps = set()
1076 1076 cl = repo.changelog
1077 1077 for r in getset(repo, subset, x):
1078 1078 if n == 0:
1079 1079 ps.add(r)
1080 1080 elif n == 1:
1081 1081 ps.add(cl.parentrevs(r)[0])
1082 1082 elif n == 2:
1083 1083 parents = cl.parentrevs(r)
1084 1084 if len(parents) > 1:
1085 1085 ps.add(parents[1])
1086 1086 return [r for r in subset if r in ps]
1087 1087
1088 1088 def present(repo, subset, x):
1089 1089 """``present(set)``
1090 1090 An empty set, if any revision in set isn't found; otherwise,
1091 1091 all revisions in set.
1092 1092
1093 1093 If any of specified revisions is not present in the local repository,
1094 1094 the query is normally aborted. But this predicate allows the query
1095 1095 to continue even in such cases.
1096 1096 """
1097 1097 try:
1098 1098 return getset(repo, subset, x)
1099 1099 except error.RepoLookupError:
1100 1100 return []
1101 1101
1102 1102 def public(repo, subset, x):
1103 1103 """``public()``
1104 1104 Changeset in public phase."""
1105 1105 # i18n: "public" is a keyword
1106 1106 getargs(x, 0, 0, _("public takes no arguments"))
1107 1107 pc = repo._phasecache
1108 1108 return [r for r in subset if pc.phase(repo, r) == phases.public]
1109 1109
1110 1110 def remote(repo, subset, x):
1111 1111 """``remote([id [,path]])``
1112 1112 Local revision that corresponds to the given identifier in a
1113 1113 remote repository, if present. Here, the '.' identifier is a
1114 1114 synonym for the current local branch.
1115 1115 """
1116 1116
1117 1117 import hg # avoid start-up nasties
1118 1118 # i18n: "remote" is a keyword
1119 1119 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1120 1120
1121 1121 q = '.'
1122 1122 if len(l) > 0:
1123 1123 # i18n: "remote" is a keyword
1124 1124 q = getstring(l[0], _("remote requires a string id"))
1125 1125 if q == '.':
1126 1126 q = repo['.'].branch()
1127 1127
1128 1128 dest = ''
1129 1129 if len(l) > 1:
1130 1130 # i18n: "remote" is a keyword
1131 1131 dest = getstring(l[1], _("remote requires a repository path"))
1132 1132 dest = repo.ui.expandpath(dest or 'default')
1133 1133 dest, branches = hg.parseurl(dest)
1134 1134 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1135 1135 if revs:
1136 1136 revs = [repo.lookup(rev) for rev in revs]
1137 1137 other = hg.peer(repo, {}, dest)
1138 1138 n = other.lookup(q)
1139 1139 if n in repo:
1140 1140 r = repo[n].rev()
1141 1141 if r in subset:
1142 1142 return [r]
1143 1143 return []
1144 1144
1145 1145 def removes(repo, subset, x):
1146 1146 """``removes(pattern)``
1147 1147 Changesets which remove files matching pattern.
1148 1148 """
1149 1149 # i18n: "removes" is a keyword
1150 1150 pat = getstring(x, _("removes requires a pattern"))
1151 1151 return checkstatus(repo, subset, pat, 2)
1152 1152
1153 1153 def rev(repo, subset, x):
1154 1154 """``rev(number)``
1155 1155 Revision with the given numeric identifier.
1156 1156 """
1157 1157 # i18n: "rev" is a keyword
1158 1158 l = getargs(x, 1, 1, _("rev requires one argument"))
1159 1159 try:
1160 1160 # i18n: "rev" is a keyword
1161 1161 l = int(getstring(l[0], _("rev requires a number")))
1162 1162 except (TypeError, ValueError):
1163 1163 # i18n: "rev" is a keyword
1164 1164 raise error.ParseError(_("rev expects a number"))
1165 1165 return [r for r in subset if r == l]
1166 1166
1167 1167 def matching(repo, subset, x):
1168 1168 """``matching(revision [, field])``
1169 1169 Changesets in which a given set of fields match the set of fields in the
1170 1170 selected revision or set.
1171 1171
1172 1172 To match more than one field pass the list of fields to match separated
1173 1173 by spaces (e.g. ``author description``).
1174 1174
1175 1175 Valid fields are most regular revision fields and some special fields.
1176 1176
1177 1177 Regular revision fields are ``description``, ``author``, ``branch``,
1178 1178 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1179 1179 and ``diff``.
1180 1180 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1181 1181 contents of the revision. Two revisions matching their ``diff`` will
1182 1182 also match their ``files``.
1183 1183
1184 1184 Special fields are ``summary`` and ``metadata``:
1185 1185 ``summary`` matches the first line of the description.
1186 1186 ``metadata`` is equivalent to matching ``description user date``
1187 1187 (i.e. it matches the main metadata fields).
1188 1188
1189 1189 ``metadata`` is the default field which is used when no fields are
1190 1190 specified. You can match more than one field at a time.
1191 1191 """
1192 1192 # i18n: "matching" is a keyword
1193 1193 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1194 1194
1195 1195 revs = getset(repo, xrange(len(repo)), l[0])
1196 1196
1197 1197 fieldlist = ['metadata']
1198 1198 if len(l) > 1:
1199 1199 fieldlist = getstring(l[1],
1200 1200 # i18n: "matching" is a keyword
1201 1201 _("matching requires a string "
1202 1202 "as its second argument")).split()
1203 1203
1204 1204 # Make sure that there are no repeated fields,
1205 1205 # expand the 'special' 'metadata' field type
1206 1206 # and check the 'files' whenever we check the 'diff'
1207 1207 fields = []
1208 1208 for field in fieldlist:
1209 1209 if field == 'metadata':
1210 1210 fields += ['user', 'description', 'date']
1211 1211 elif field == 'diff':
1212 1212 # a revision matching the diff must also match the files
1213 1213 # since matching the diff is very costly, make sure to
1214 1214 # also match the files first
1215 1215 fields += ['files', 'diff']
1216 1216 else:
1217 1217 if field == 'author':
1218 1218 field = 'user'
1219 1219 fields.append(field)
1220 1220 fields = set(fields)
1221 1221 if 'summary' in fields and 'description' in fields:
1222 1222 # If a revision matches its description it also matches its summary
1223 1223 fields.discard('summary')
1224 1224
1225 1225 # We may want to match more than one field
1226 1226 # Not all fields take the same amount of time to be matched
1227 1227 # Sort the selected fields in order of increasing matching cost
1228 1228 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1229 1229 'files', 'description', 'substate', 'diff']
1230 1230 def fieldkeyfunc(f):
1231 1231 try:
1232 1232 return fieldorder.index(f)
1233 1233 except ValueError:
1234 1234 # assume an unknown field is very costly
1235 1235 return len(fieldorder)
1236 1236 fields = list(fields)
1237 1237 fields.sort(key=fieldkeyfunc)
1238 1238
1239 1239 # Each field will be matched with its own "getfield" function
1240 1240 # which will be added to the getfieldfuncs array of functions
1241 1241 getfieldfuncs = []
1242 1242 _funcs = {
1243 1243 'user': lambda r: repo[r].user(),
1244 1244 'branch': lambda r: repo[r].branch(),
1245 1245 'date': lambda r: repo[r].date(),
1246 1246 'description': lambda r: repo[r].description(),
1247 1247 'files': lambda r: repo[r].files(),
1248 1248 'parents': lambda r: repo[r].parents(),
1249 1249 'phase': lambda r: repo[r].phase(),
1250 1250 'substate': lambda r: repo[r].substate,
1251 1251 'summary': lambda r: repo[r].description().splitlines()[0],
1252 1252 'diff': lambda r: list(repo[r].diff(git=True),)
1253 1253 }
1254 1254 for info in fields:
1255 1255 getfield = _funcs.get(info, None)
1256 1256 if getfield is None:
1257 1257 raise error.ParseError(
1258 1258 # i18n: "matching" is a keyword
1259 1259 _("unexpected field name passed to matching: %s") % info)
1260 1260 getfieldfuncs.append(getfield)
1261 1261 # convert the getfield array of functions into a "getinfo" function
1262 1262 # which returns an array of field values (or a single value if there
1263 1263 # is only one field to match)
1264 1264 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1265 1265
1266 1266 matches = set()
1267 1267 for rev in revs:
1268 1268 target = getinfo(rev)
1269 1269 for r in subset:
1270 1270 match = True
1271 1271 for n, f in enumerate(getfieldfuncs):
1272 1272 if target[n] != f(r):
1273 1273 match = False
1274 1274 break
1275 1275 if match:
1276 1276 matches.add(r)
1277 1277 return [r for r in subset if r in matches]
1278 1278
1279 1279 def reverse(repo, subset, x):
1280 1280 """``reverse(set)``
1281 1281 Reverse order of set.
1282 1282 """
1283 1283 l = getset(repo, subset, x)
1284 1284 if not isinstance(l, list):
1285 1285 l = list(l)
1286 1286 l.reverse()
1287 1287 return l
1288 1288
1289 1289 def roots(repo, subset, x):
1290 1290 """``roots(set)``
1291 1291 Changesets in set with no parent changeset in set.
1292 1292 """
1293 1293 s = set(getset(repo, xrange(len(repo)), x))
1294 1294 subset = [r for r in subset if r in s]
1295 1295 cs = _children(repo, subset, s)
1296 1296 return [r for r in subset if r not in cs]
1297 1297
1298 1298 def secret(repo, subset, x):
1299 1299 """``secret()``
1300 1300 Changeset in secret phase."""
1301 1301 # i18n: "secret" is a keyword
1302 1302 getargs(x, 0, 0, _("secret takes no arguments"))
1303 1303 pc = repo._phasecache
1304 1304 return [r for r in subset if pc.phase(repo, r) == phases.secret]
1305 1305
1306 1306 def sort(repo, subset, x):
1307 1307 """``sort(set[, [-]key...])``
1308 1308 Sort set by keys. The default sort order is ascending, specify a key
1309 1309 as ``-key`` to sort in descending order.
1310 1310
1311 1311 The keys can be:
1312 1312
1313 1313 - ``rev`` for the revision number,
1314 1314 - ``branch`` for the branch name,
1315 1315 - ``desc`` for the commit message (description),
1316 1316 - ``user`` for user name (``author`` can be used as an alias),
1317 1317 - ``date`` for the commit date
1318 1318 """
1319 1319 # i18n: "sort" is a keyword
1320 1320 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1321 1321 keys = "rev"
1322 1322 if len(l) == 2:
1323 1323 # i18n: "sort" is a keyword
1324 1324 keys = getstring(l[1], _("sort spec must be a string"))
1325 1325
1326 1326 s = l[0]
1327 1327 keys = keys.split()
1328 1328 l = []
1329 1329 def invert(s):
1330 1330 return "".join(chr(255 - ord(c)) for c in s)
1331 1331 for r in getset(repo, subset, s):
1332 1332 c = repo[r]
1333 1333 e = []
1334 1334 for k in keys:
1335 1335 if k == 'rev':
1336 1336 e.append(r)
1337 1337 elif k == '-rev':
1338 1338 e.append(-r)
1339 1339 elif k == 'branch':
1340 1340 e.append(c.branch())
1341 1341 elif k == '-branch':
1342 1342 e.append(invert(c.branch()))
1343 1343 elif k == 'desc':
1344 1344 e.append(c.description())
1345 1345 elif k == '-desc':
1346 1346 e.append(invert(c.description()))
1347 1347 elif k in 'user author':
1348 1348 e.append(c.user())
1349 1349 elif k in '-user -author':
1350 1350 e.append(invert(c.user()))
1351 1351 elif k == 'date':
1352 1352 e.append(c.date()[0])
1353 1353 elif k == '-date':
1354 1354 e.append(-c.date()[0])
1355 1355 else:
1356 1356 raise error.ParseError(_("unknown sort key %r") % k)
1357 1357 e.append(r)
1358 1358 l.append(e)
1359 1359 l.sort()
1360 1360 return [e[-1] for e in l]
1361 1361
1362 1362 def _stringmatcher(pattern):
1363 1363 """
1364 1364 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1365 1365 returns the matcher name, pattern, and matcher function.
1366 1366 missing or unknown prefixes are treated as literal matches.
1367 1367
1368 1368 helper for tests:
1369 1369 >>> def test(pattern, *tests):
1370 1370 ... kind, pattern, matcher = _stringmatcher(pattern)
1371 1371 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1372 1372
1373 1373 exact matching (no prefix):
1374 1374 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1375 1375 ('literal', 'abcdefg', [False, False, True])
1376 1376
1377 1377 regex matching ('re:' prefix)
1378 1378 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1379 1379 ('re', 'a.+b', [False, False, True])
1380 1380
1381 1381 force exact matches ('literal:' prefix)
1382 1382 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1383 1383 ('literal', 're:foobar', [False, True])
1384 1384
1385 1385 unknown prefixes are ignored and treated as literals
1386 1386 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1387 1387 ('literal', 'foo:bar', [False, False, True])
1388 1388 """
1389 1389 if pattern.startswith('re:'):
1390 1390 pattern = pattern[3:]
1391 1391 try:
1392 1392 regex = re.compile(pattern)
1393 1393 except re.error, e:
1394 1394 raise error.ParseError(_('invalid regular expression: %s')
1395 1395 % e)
1396 1396 return 're', pattern, regex.search
1397 1397 elif pattern.startswith('literal:'):
1398 1398 pattern = pattern[8:]
1399 1399 return 'literal', pattern, pattern.__eq__
1400 1400
1401 1401 def _substringmatcher(pattern):
1402 1402 kind, pattern, matcher = _stringmatcher(pattern)
1403 1403 if kind == 'literal':
1404 1404 matcher = lambda s: pattern in s
1405 1405 return kind, pattern, matcher
1406 1406
1407 1407 def tag(repo, subset, x):
1408 1408 """``tag([name])``
1409 1409 The specified tag by name, or all tagged revisions if no name is given.
1410 1410 """
1411 1411 # i18n: "tag" is a keyword
1412 1412 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1413 1413 cl = repo.changelog
1414 1414 if args:
1415 1415 pattern = getstring(args[0],
1416 1416 # i18n: "tag" is a keyword
1417 1417 _('the argument to tag must be a string'))
1418 1418 kind, pattern, matcher = _stringmatcher(pattern)
1419 1419 if kind == 'literal':
1420 1420 # avoid resolving all tags
1421 1421 tn = repo._tagscache.tags.get(pattern, None)
1422 1422 if tn is None:
1423 1423 raise util.Abort(_("tag '%s' does not exist") % pattern)
1424 1424 s = set([repo[tn].rev()])
1425 1425 else:
1426 1426 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1427 1427 if not s:
1428 1428 raise util.Abort(_("no tags exist that match '%s'") % pattern)
1429 1429 else:
1430 1430 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1431 1431 return [r for r in subset if r in s]
1432 1432
1433 1433 def tagged(repo, subset, x):
1434 1434 return tag(repo, subset, x)
1435 1435
1436 1436 def unstable(repo, subset, x):
1437 1437 """``unstable()``
1438 1438 Non-obsolete changesets with obsolete ancestors.
1439 1439 """
1440 1440 # i18n: "unstable" is a keyword
1441 1441 getargs(x, 0, 0, _("unstable takes no arguments"))
1442 1442 unstables = obsmod.getobscache(repo, 'unstable')
1443 1443 return [r for r in subset if r in unstables]
1444 1444
1445 1445
1446 1446 def user(repo, subset, x):
1447 1447 """``user(string)``
1448 1448 User name contains string. The match is case-insensitive.
1449 1449
1450 1450 If `string` starts with `re:`, the remainder of the string is treated as
1451 1451 a regular expression. To match a user that actually contains `re:`, use
1452 1452 the prefix `literal:`.
1453 1453 """
1454 1454 return author(repo, subset, x)
1455 1455
1456 1456 # for internal use
1457 1457 def _list(repo, subset, x):
1458 1458 s = getstring(x, "internal error")
1459 1459 if not s:
1460 1460 return []
1461 1461 if not isinstance(subset, set):
1462 1462 subset = set(subset)
1463 1463 ls = [repo[r].rev() for r in s.split('\0')]
1464 1464 return [r for r in ls if r in subset]
1465 1465
1466 1466 symbols = {
1467 1467 "adds": adds,
1468 1468 "all": getall,
1469 1469 "ancestor": ancestor,
1470 1470 "ancestors": ancestors,
1471 1471 "_firstancestors": _firstancestors,
1472 1472 "author": author,
1473 1473 "bisect": bisect,
1474 1474 "bisected": bisected,
1475 1475 "bookmark": bookmark,
1476 1476 "branch": branch,
1477 1477 "children": children,
1478 1478 "closed": closed,
1479 1479 "contains": contains,
1480 1480 "converted": converted,
1481 1481 "date": date,
1482 1482 "desc": desc,
1483 1483 "descendants": descendants,
1484 1484 "_firstdescendants": _firstdescendants,
1485 1485 "destination": destination,
1486 1486 "draft": draft,
1487 1487 "extinct": extinct,
1488 1488 "extra": extra,
1489 1489 "file": hasfile,
1490 1490 "filelog": filelog,
1491 1491 "first": first,
1492 1492 "follow": follow,
1493 1493 "_followfirst": _followfirst,
1494 1494 "grep": grep,
1495 1495 "head": head,
1496 1496 "heads": heads,
1497 1497 "hidden": hidden,
1498 1498 "id": node_,
1499 1499 "keyword": keyword,
1500 1500 "last": last,
1501 1501 "limit": limit,
1502 1502 "_matchfiles": _matchfiles,
1503 1503 "max": maxrev,
1504 1504 "merge": merge,
1505 1505 "min": minrev,
1506 1506 "modifies": modifies,
1507 1507 "obsolete": obsolete,
1508 1508 "origin": origin,
1509 1509 "outgoing": outgoing,
1510 1510 "p1": p1,
1511 1511 "p2": p2,
1512 1512 "parents": parents,
1513 1513 "present": present,
1514 1514 "public": public,
1515 1515 "remote": remote,
1516 1516 "removes": removes,
1517 1517 "rev": rev,
1518 1518 "reverse": reverse,
1519 1519 "roots": roots,
1520 1520 "sort": sort,
1521 1521 "secret": secret,
1522 1522 "matching": matching,
1523 1523 "tag": tag,
1524 1524 "tagged": tagged,
1525 1525 "user": user,
1526 1526 "unstable": unstable,
1527 1527 "_list": _list,
1528 1528 }
1529 1529
1530 1530 methods = {
1531 1531 "range": rangeset,
1532 1532 "dagrange": dagrange,
1533 1533 "string": stringset,
1534 1534 "symbol": symbolset,
1535 1535 "and": andset,
1536 1536 "or": orset,
1537 1537 "not": notset,
1538 1538 "list": listset,
1539 1539 "func": func,
1540 1540 "ancestor": ancestorspec,
1541 1541 "parent": parentspec,
1542 1542 "parentpost": p1,
1543 1543 }
1544 1544
1545 1545 def optimize(x, small):
1546 1546 if x is None:
1547 1547 return 0, x
1548 1548
1549 1549 smallbonus = 1
1550 1550 if small:
1551 1551 smallbonus = .5
1552 1552
1553 1553 op = x[0]
1554 1554 if op == 'minus':
1555 1555 return optimize(('and', x[1], ('not', x[2])), small)
1556 1556 elif op == 'dagrangepre':
1557 1557 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1558 1558 elif op == 'dagrangepost':
1559 1559 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1560 1560 elif op == 'rangepre':
1561 1561 return optimize(('range', ('string', '0'), x[1]), small)
1562 1562 elif op == 'rangepost':
1563 1563 return optimize(('range', x[1], ('string', 'tip')), small)
1564 1564 elif op == 'negate':
1565 1565 return optimize(('string',
1566 1566 '-' + getstring(x[1], _("can't negate that"))), small)
1567 1567 elif op in 'string symbol negate':
1568 1568 return smallbonus, x # single revisions are small
1569 1569 elif op == 'and':
1570 1570 wa, ta = optimize(x[1], True)
1571 1571 wb, tb = optimize(x[2], True)
1572 1572 w = min(wa, wb)
1573 1573 if wa > wb:
1574 1574 return w, (op, tb, ta)
1575 1575 return w, (op, ta, tb)
1576 1576 elif op == 'or':
1577 1577 wa, ta = optimize(x[1], False)
1578 1578 wb, tb = optimize(x[2], False)
1579 1579 if wb < wa:
1580 1580 wb, wa = wa, wb
1581 1581 return max(wa, wb), (op, ta, tb)
1582 1582 elif op == 'not':
1583 1583 o = optimize(x[1], not small)
1584 1584 return o[0], (op, o[1])
1585 1585 elif op == 'parentpost':
1586 1586 o = optimize(x[1], small)
1587 1587 return o[0], (op, o[1])
1588 1588 elif op == 'group':
1589 1589 return optimize(x[1], small)
1590 1590 elif op in 'dagrange range list parent ancestorspec':
1591 1591 if op == 'parent':
1592 1592 # x^:y means (x^) : y, not x ^ (:y)
1593 1593 post = ('parentpost', x[1])
1594 1594 if x[2][0] == 'dagrangepre':
1595 1595 return optimize(('dagrange', post, x[2][1]), small)
1596 1596 elif x[2][0] == 'rangepre':
1597 1597 return optimize(('range', post, x[2][1]), small)
1598 1598
1599 1599 wa, ta = optimize(x[1], small)
1600 1600 wb, tb = optimize(x[2], small)
1601 1601 return wa + wb, (op, ta, tb)
1602 1602 elif op == 'func':
1603 1603 f = getstring(x[1], _("not a symbol"))
1604 1604 wa, ta = optimize(x[2], small)
1605 1605 if f in ("author branch closed date desc file grep keyword "
1606 1606 "outgoing user"):
1607 1607 w = 10 # slow
1608 1608 elif f in "modifies adds removes":
1609 1609 w = 30 # slower
1610 1610 elif f == "contains":
1611 1611 w = 100 # very slow
1612 1612 elif f == "ancestor":
1613 1613 w = 1 * smallbonus
1614 1614 elif f in "reverse limit first":
1615 1615 w = 0
1616 1616 elif f in "sort":
1617 1617 w = 10 # assume most sorts look at changelog
1618 1618 else:
1619 1619 w = 1
1620 1620 return w + wa, (op, x[1], ta)
1621 1621 return 1, x
1622 1622
1623 1623 _aliasarg = ('func', ('symbol', '_aliasarg'))
1624 1624 def _getaliasarg(tree):
1625 1625 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1626 1626 return X, None otherwise.
1627 1627 """
1628 1628 if (len(tree) == 3 and tree[:2] == _aliasarg
1629 1629 and tree[2][0] == 'string'):
1630 1630 return tree[2][1]
1631 1631 return None
1632 1632
1633 1633 def _checkaliasarg(tree, known=None):
1634 1634 """Check tree contains no _aliasarg construct or only ones which
1635 1635 value is in known. Used to avoid alias placeholders injection.
1636 1636 """
1637 1637 if isinstance(tree, tuple):
1638 1638 arg = _getaliasarg(tree)
1639 1639 if arg is not None and (not known or arg not in known):
1640 1640 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1641 1641 for t in tree:
1642 1642 _checkaliasarg(t, known)
1643 1643
1644 1644 class revsetalias(object):
1645 1645 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1646 1646 args = None
1647 1647
1648 1648 def __init__(self, name, value):
1649 1649 '''Aliases like:
1650 1650
1651 1651 h = heads(default)
1652 1652 b($1) = ancestors($1) - ancestors(default)
1653 1653 '''
1654 1654 m = self.funcre.search(name)
1655 1655 if m:
1656 1656 self.name = m.group(1)
1657 1657 self.tree = ('func', ('symbol', m.group(1)))
1658 1658 self.args = [x.strip() for x in m.group(2).split(',')]
1659 1659 for arg in self.args:
1660 1660 # _aliasarg() is an unknown symbol only used separate
1661 1661 # alias argument placeholders from regular strings.
1662 1662 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1663 1663 else:
1664 1664 self.name = name
1665 1665 self.tree = ('symbol', name)
1666 1666
1667 1667 self.replacement, pos = parse(value)
1668 1668 if pos != len(value):
1669 1669 raise error.ParseError(_('invalid token'), pos)
1670 1670 # Check for placeholder injection
1671 1671 _checkaliasarg(self.replacement, self.args)
1672 1672
1673 1673 def _getalias(aliases, tree):
1674 1674 """If tree looks like an unexpanded alias, return it. Return None
1675 1675 otherwise.
1676 1676 """
1677 1677 if isinstance(tree, tuple) and tree:
1678 1678 if tree[0] == 'symbol' and len(tree) == 2:
1679 1679 name = tree[1]
1680 1680 alias = aliases.get(name)
1681 1681 if alias and alias.args is None and alias.tree == tree:
1682 1682 return alias
1683 1683 if tree[0] == 'func' and len(tree) > 1:
1684 1684 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1685 1685 name = tree[1][1]
1686 1686 alias = aliases.get(name)
1687 1687 if alias and alias.args is not None and alias.tree == tree[:2]:
1688 1688 return alias
1689 1689 return None
1690 1690
1691 1691 def _expandargs(tree, args):
1692 1692 """Replace _aliasarg instances with the substitution value of the
1693 1693 same name in args, recursively.
1694 1694 """
1695 1695 if not tree or not isinstance(tree, tuple):
1696 1696 return tree
1697 1697 arg = _getaliasarg(tree)
1698 1698 if arg is not None:
1699 1699 return args[arg]
1700 1700 return tuple(_expandargs(t, args) for t in tree)
1701 1701
1702 1702 def _expandaliases(aliases, tree, expanding, cache):
1703 1703 """Expand aliases in tree, recursively.
1704 1704
1705 1705 'aliases' is a dictionary mapping user defined aliases to
1706 1706 revsetalias objects.
1707 1707 """
1708 1708 if not isinstance(tree, tuple):
1709 1709 # Do not expand raw strings
1710 1710 return tree
1711 1711 alias = _getalias(aliases, tree)
1712 1712 if alias is not None:
1713 1713 if alias in expanding:
1714 1714 raise error.ParseError(_('infinite expansion of revset alias "%s" '
1715 1715 'detected') % alias.name)
1716 1716 expanding.append(alias)
1717 1717 if alias.name not in cache:
1718 1718 cache[alias.name] = _expandaliases(aliases, alias.replacement,
1719 1719 expanding, cache)
1720 1720 result = cache[alias.name]
1721 1721 expanding.pop()
1722 1722 if alias.args is not None:
1723 1723 l = getlist(tree[2])
1724 1724 if len(l) != len(alias.args):
1725 1725 raise error.ParseError(
1726 1726 _('invalid number of arguments: %s') % len(l))
1727 1727 l = [_expandaliases(aliases, a, [], cache) for a in l]
1728 1728 result = _expandargs(result, dict(zip(alias.args, l)))
1729 1729 else:
1730 1730 result = tuple(_expandaliases(aliases, t, expanding, cache)
1731 1731 for t in tree)
1732 1732 return result
1733 1733
1734 1734 def findaliases(ui, tree):
1735 1735 _checkaliasarg(tree)
1736 1736 aliases = {}
1737 1737 for k, v in ui.configitems('revsetalias'):
1738 1738 alias = revsetalias(k, v)
1739 1739 aliases[alias.name] = alias
1740 1740 return _expandaliases(aliases, tree, [], {})
1741 1741
1742 1742 parse = parser.parser(tokenize, elements).parse
1743 1743
1744 1744 def match(ui, spec):
1745 1745 if not spec:
1746 1746 raise error.ParseError(_("empty query"))
1747 1747 tree, pos = parse(spec)
1748 1748 if (pos != len(spec)):
1749 1749 raise error.ParseError(_("invalid token"), pos)
1750 1750 if ui:
1751 1751 tree = findaliases(ui, tree)
1752 1752 weight, tree = optimize(tree, True)
1753 1753 def mfunc(repo, subset):
1754 1754 return getset(repo, subset, tree)
1755 1755 return mfunc
1756 1756
1757 1757 def formatspec(expr, *args):
1758 1758 '''
1759 1759 This is a convenience function for using revsets internally, and
1760 1760 escapes arguments appropriately. Aliases are intentionally ignored
1761 1761 so that intended expression behavior isn't accidentally subverted.
1762 1762
1763 1763 Supported arguments:
1764 1764
1765 1765 %r = revset expression, parenthesized
1766 1766 %d = int(arg), no quoting
1767 1767 %s = string(arg), escaped and single-quoted
1768 1768 %b = arg.branch(), escaped and single-quoted
1769 1769 %n = hex(arg), single-quoted
1770 1770 %% = a literal '%'
1771 1771
1772 1772 Prefixing the type with 'l' specifies a parenthesized list of that type.
1773 1773
1774 1774 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
1775 1775 '(10 or 11):: and ((this()) or (that()))'
1776 1776 >>> formatspec('%d:: and not %d::', 10, 20)
1777 1777 '10:: and not 20::'
1778 1778 >>> formatspec('%ld or %ld', [], [1])
1779 1779 "_list('') or 1"
1780 1780 >>> formatspec('keyword(%s)', 'foo\\xe9')
1781 1781 "keyword('foo\\\\xe9')"
1782 1782 >>> b = lambda: 'default'
1783 1783 >>> b.branch = b
1784 1784 >>> formatspec('branch(%b)', b)
1785 1785 "branch('default')"
1786 1786 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
1787 1787 "root(_list('a\\x00b\\x00c\\x00d'))"
1788 1788 '''
1789 1789
1790 1790 def quote(s):
1791 1791 return repr(str(s))
1792 1792
1793 1793 def argtype(c, arg):
1794 1794 if c == 'd':
1795 1795 return str(int(arg))
1796 1796 elif c == 's':
1797 1797 return quote(arg)
1798 1798 elif c == 'r':
1799 1799 parse(arg) # make sure syntax errors are confined
1800 1800 return '(%s)' % arg
1801 1801 elif c == 'n':
1802 1802 return quote(node.hex(arg))
1803 1803 elif c == 'b':
1804 1804 return quote(arg.branch())
1805 1805
1806 1806 def listexp(s, t):
1807 1807 l = len(s)
1808 1808 if l == 0:
1809 1809 return "_list('')"
1810 1810 elif l == 1:
1811 1811 return argtype(t, s[0])
1812 1812 elif t == 'd':
1813 1813 return "_list('%s')" % "\0".join(str(int(a)) for a in s)
1814 1814 elif t == 's':
1815 1815 return "_list('%s')" % "\0".join(s)
1816 1816 elif t == 'n':
1817 1817 return "_list('%s')" % "\0".join(node.hex(a) for a in s)
1818 1818 elif t == 'b':
1819 1819 return "_list('%s')" % "\0".join(a.branch() for a in s)
1820 1820
1821 1821 m = l // 2
1822 1822 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
1823 1823
1824 1824 ret = ''
1825 1825 pos = 0
1826 1826 arg = 0
1827 1827 while pos < len(expr):
1828 1828 c = expr[pos]
1829 1829 if c == '%':
1830 1830 pos += 1
1831 1831 d = expr[pos]
1832 1832 if d == '%':
1833 1833 ret += d
1834 1834 elif d in 'dsnbr':
1835 1835 ret += argtype(d, args[arg])
1836 1836 arg += 1
1837 1837 elif d == 'l':
1838 1838 # a list of some type
1839 1839 pos += 1
1840 1840 d = expr[pos]
1841 1841 ret += listexp(list(args[arg]), d)
1842 1842 arg += 1
1843 1843 else:
1844 1844 raise util.Abort('unexpected revspec format character %s' % d)
1845 1845 else:
1846 1846 ret += c
1847 1847 pos += 1
1848 1848
1849 1849 return ret
1850 1850
1851 1851 def prettyformat(tree):
1852 1852 def _prettyformat(tree, level, lines):
1853 1853 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
1854 1854 lines.append((level, str(tree)))
1855 1855 else:
1856 1856 lines.append((level, '(%s' % tree[0]))
1857 1857 for s in tree[1:]:
1858 1858 _prettyformat(s, level + 1, lines)
1859 1859 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
1860 1860
1861 1861 lines = []
1862 1862 _prettyformat(tree, 0, lines)
1863 1863 output = '\n'.join((' '*l + s) for l, s in lines)
1864 1864 return output
1865 1865
1866 1866 # tell hggettext to extract docstrings from these functions:
1867 1867 i18nfunctions = symbols.values()
@@ -1,933 +1,933
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, error, osutil, revset, similar, encoding, phases
10 10 import match as matchmod
11 11 import os, errno, re, stat, sys, glob
12 12
13 13 def nochangesfound(ui, repo, excluded=None):
14 14 '''Report no changes for push/pull, excluded is None or a list of
15 15 nodes excluded from the push/pull.
16 16 '''
17 17 secretlist = []
18 18 if excluded:
19 19 for n in excluded:
20 20 ctx = repo[n]
21 21 if ctx.phase() >= phases.secret and not ctx.extinct():
22 22 secretlist.append(n)
23 23
24 24 if secretlist:
25 25 ui.status(_("no changes found (ignored %d secret changesets)\n")
26 26 % len(secretlist))
27 27 else:
28 28 ui.status(_("no changes found\n"))
29 29
30 30 def checkfilename(f):
31 31 '''Check that the filename f is an acceptable filename for a tracked file'''
32 32 if '\r' in f or '\n' in f:
33 33 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
34 34
35 35 def checkportable(ui, f):
36 36 '''Check if filename f is portable and warn or abort depending on config'''
37 37 checkfilename(f)
38 38 abort, warn = checkportabilityalert(ui)
39 39 if abort or warn:
40 40 msg = util.checkwinfilename(f)
41 41 if msg:
42 42 msg = "%s: %r" % (msg, f)
43 43 if abort:
44 44 raise util.Abort(msg)
45 45 ui.warn(_("warning: %s\n") % msg)
46 46
47 47 def checkportabilityalert(ui):
48 48 '''check if the user's config requests nothing, a warning, or abort for
49 49 non-portable filenames'''
50 50 val = ui.config('ui', 'portablefilenames', 'warn')
51 51 lval = val.lower()
52 52 bval = util.parsebool(val)
53 53 abort = os.name == 'nt' or lval == 'abort'
54 54 warn = bval or lval == 'warn'
55 55 if bval is None and not (warn or abort or lval == 'ignore'):
56 56 raise error.ConfigError(
57 57 _("ui.portablefilenames value is invalid ('%s')") % val)
58 58 return abort, warn
59 59
60 60 class casecollisionauditor(object):
61 61 def __init__(self, ui, abort, dirstate):
62 62 self._ui = ui
63 63 self._abort = abort
64 64 allfiles = '\0'.join(dirstate._map)
65 65 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
66 66 self._dirstate = dirstate
67 67 # The purpose of _newfiles is so that we don't complain about
68 68 # case collisions if someone were to call this object with the
69 69 # same filename twice.
70 70 self._newfiles = set()
71 71
72 72 def __call__(self, f):
73 73 fl = encoding.lower(f)
74 74 if (fl in self._loweredfiles and f not in self._dirstate and
75 75 f not in self._newfiles):
76 76 msg = _('possible case-folding collision for %s') % f
77 77 if self._abort:
78 78 raise util.Abort(msg)
79 79 self._ui.warn(_("warning: %s\n") % msg)
80 80 self._loweredfiles.add(fl)
81 81 self._newfiles.add(f)
82 82
83 83 class pathauditor(object):
84 84 '''ensure that a filesystem path contains no banned components.
85 85 the following properties of a path are checked:
86 86
87 87 - ends with a directory separator
88 88 - under top-level .hg
89 89 - starts at the root of a windows drive
90 90 - contains ".."
91 91 - traverses a symlink (e.g. a/symlink_here/b)
92 92 - inside a nested repository (a callback can be used to approve
93 93 some nested repositories, e.g., subrepositories)
94 94 '''
95 95
96 96 def __init__(self, root, callback=None):
97 97 self.audited = set()
98 98 self.auditeddir = set()
99 99 self.root = root
100 100 self.callback = callback
101 101 if os.path.lexists(root) and not util.checkcase(root):
102 102 self.normcase = util.normcase
103 103 else:
104 104 self.normcase = lambda x: x
105 105
106 106 def __call__(self, path):
107 107 '''Check the relative path.
108 108 path may contain a pattern (e.g. foodir/**.txt)'''
109 109
110 110 path = util.localpath(path)
111 111 normpath = self.normcase(path)
112 112 if normpath in self.audited:
113 113 return
114 114 # AIX ignores "/" at end of path, others raise EISDIR.
115 115 if util.endswithsep(path):
116 116 raise util.Abort(_("path ends in directory separator: %s") % path)
117 117 parts = util.splitpath(path)
118 118 if (os.path.splitdrive(path)[0]
119 119 or parts[0].lower() in ('.hg', '.hg.', '')
120 120 or os.pardir in parts):
121 121 raise util.Abort(_("path contains illegal component: %s") % path)
122 122 if '.hg' in path.lower():
123 123 lparts = [p.lower() for p in parts]
124 124 for p in '.hg', '.hg.':
125 125 if p in lparts[1:]:
126 126 pos = lparts.index(p)
127 127 base = os.path.join(*parts[:pos])
128 128 raise util.Abort(_("path '%s' is inside nested repo %r")
129 129 % (path, base))
130 130
131 131 normparts = util.splitpath(normpath)
132 132 assert len(parts) == len(normparts)
133 133
134 134 parts.pop()
135 135 normparts.pop()
136 136 prefixes = []
137 137 while parts:
138 138 prefix = os.sep.join(parts)
139 139 normprefix = os.sep.join(normparts)
140 140 if normprefix in self.auditeddir:
141 141 break
142 142 curpath = os.path.join(self.root, prefix)
143 143 try:
144 144 st = os.lstat(curpath)
145 145 except OSError, err:
146 146 # EINVAL can be raised as invalid path syntax under win32.
147 147 # They must be ignored for patterns can be checked too.
148 148 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
149 149 raise
150 150 else:
151 151 if stat.S_ISLNK(st.st_mode):
152 152 raise util.Abort(
153 153 _('path %r traverses symbolic link %r')
154 154 % (path, prefix))
155 155 elif (stat.S_ISDIR(st.st_mode) and
156 156 os.path.isdir(os.path.join(curpath, '.hg'))):
157 157 if not self.callback or not self.callback(curpath):
158 158 raise util.Abort(_("path '%s' is inside nested "
159 159 "repo %r")
160 160 % (path, prefix))
161 161 prefixes.append(normprefix)
162 162 parts.pop()
163 163 normparts.pop()
164 164
165 165 self.audited.add(normpath)
166 166 # only add prefixes to the cache after checking everything: we don't
167 167 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
168 168 self.auditeddir.update(prefixes)
169 169
170 170 class abstractvfs(object):
171 171 """Abstract base class; cannot be instantiated"""
172 172
173 173 def __init__(self, *args, **kwargs):
174 174 '''Prevent instantiation; don't call this from subclasses.'''
175 175 raise NotImplementedError('attempted instantiating ' + str(type(self)))
176 176
177 177 def tryread(self, path):
178 178 '''gracefully return an empty string for missing files'''
179 179 try:
180 180 return self.read(path)
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184 return ""
185 185
186 186 def read(self, path):
187 187 fp = self(path, 'rb')
188 188 try:
189 189 return fp.read()
190 190 finally:
191 191 fp.close()
192 192
193 193 def write(self, path, data):
194 194 fp = self(path, 'wb')
195 195 try:
196 196 return fp.write(data)
197 197 finally:
198 198 fp.close()
199 199
200 200 def append(self, path, data):
201 201 fp = self(path, 'ab')
202 202 try:
203 203 return fp.write(data)
204 204 finally:
205 205 fp.close()
206 206
207 207 def mkdir(self, path=None):
208 208 return os.mkdir(self.join(path))
209 209
210 210 def exists(self, path=None):
211 211 return os.path.exists(self.join(path))
212 212
213 213 def isdir(self, path=None):
214 214 return os.path.isdir(self.join(path))
215 215
216 216 def makedir(self, path=None, notindexed=True):
217 217 return util.makedir(self.join(path), notindexed)
218 218
219 219 def makedirs(self, path=None, mode=None):
220 220 return util.makedirs(self.join(path), mode)
221 221
222 222 class vfs(abstractvfs):
223 223 '''Operate files relative to a base directory
224 224
225 225 This class is used to hide the details of COW semantics and
226 226 remote file access from higher level code.
227 227 '''
228 228 def __init__(self, base, audit=True, expand=False):
229 229 if expand:
230 230 base = os.path.realpath(util.expandpath(base))
231 231 self.base = base
232 232 self.basesep = self.base + os.sep
233 233 self._setmustaudit(audit)
234 234 self.createmode = None
235 235 self._trustnlink = None
236 236
237 237 def _getmustaudit(self):
238 238 return self._audit
239 239
240 240 def _setmustaudit(self, onoff):
241 241 self._audit = onoff
242 242 if onoff:
243 243 self.auditor = pathauditor(self.base)
244 244 else:
245 245 self.auditor = util.always
246 246
247 247 mustaudit = property(_getmustaudit, _setmustaudit)
248 248
249 249 @util.propertycache
250 250 def _cansymlink(self):
251 251 return util.checklink(self.base)
252 252
253 253 def _fixfilemode(self, name):
254 254 if self.createmode is None:
255 255 return
256 256 os.chmod(name, self.createmode & 0666)
257 257
258 258 def __call__(self, path, mode="r", text=False, atomictemp=False):
259 259 if self._audit:
260 260 r = util.checkosfilename(path)
261 261 if r:
262 262 raise util.Abort("%s: %r" % (r, path))
263 263 self.auditor(path)
264 264 f = self.join(path)
265 265
266 266 if not text and "b" not in mode:
267 267 mode += "b" # for that other OS
268 268
269 269 nlink = -1
270 270 dirname, basename = util.split(f)
271 271 # If basename is empty, then the path is malformed because it points
272 272 # to a directory. Let the posixfile() call below raise IOError.
273 273 if basename and mode not in ('r', 'rb'):
274 274 if atomictemp:
275 275 if not os.path.isdir(dirname):
276 276 util.makedirs(dirname, self.createmode)
277 277 return util.atomictempfile(f, mode, self.createmode)
278 278 try:
279 279 if 'w' in mode:
280 280 util.unlink(f)
281 281 nlink = 0
282 282 else:
283 283 # nlinks() may behave differently for files on Windows
284 284 # shares if the file is open.
285 285 fd = util.posixfile(f)
286 286 nlink = util.nlinks(f)
287 287 if nlink < 1:
288 288 nlink = 2 # force mktempcopy (issue1922)
289 289 fd.close()
290 290 except (OSError, IOError), e:
291 291 if e.errno != errno.ENOENT:
292 292 raise
293 293 nlink = 0
294 294 if not os.path.isdir(dirname):
295 295 util.makedirs(dirname, self.createmode)
296 296 if nlink > 0:
297 297 if self._trustnlink is None:
298 298 self._trustnlink = nlink > 1 or util.checknlink(f)
299 299 if nlink > 1 or not self._trustnlink:
300 300 util.rename(util.mktempcopy(f), f)
301 301 fp = util.posixfile(f, mode)
302 302 if nlink == 0:
303 303 self._fixfilemode(f)
304 304 return fp
305 305
306 306 def symlink(self, src, dst):
307 307 self.auditor(dst)
308 308 linkname = self.join(dst)
309 309 try:
310 310 os.unlink(linkname)
311 311 except OSError:
312 312 pass
313 313
314 314 dirname = os.path.dirname(linkname)
315 315 if not os.path.exists(dirname):
316 316 util.makedirs(dirname, self.createmode)
317 317
318 318 if self._cansymlink:
319 319 try:
320 320 os.symlink(src, linkname)
321 321 except OSError, err:
322 322 raise OSError(err.errno, _('could not symlink to %r: %s') %
323 323 (src, err.strerror), linkname)
324 324 else:
325 325 f = self(dst, "w")
326 326 f.write(src)
327 327 f.close()
328 328 self._fixfilemode(dst)
329 329
330 330 def audit(self, path):
331 331 self.auditor(path)
332 332
333 333 def join(self, path):
334 334 if path:
335 335 return path.startswith('/') and path or (self.basesep + path)
336 336 return self.base
337 337
338 338 opener = vfs
339 339
340 340 class filtervfs(abstractvfs):
341 341 '''Wrapper vfs for filtering filenames with a function.'''
342 342
343 343 def __init__(self, opener, filter):
344 344 self._filter = filter
345 345 self._orig = opener
346 346
347 347 def __call__(self, path, *args, **kwargs):
348 348 return self._orig(self._filter(path), *args, **kwargs)
349 349
350 350 filteropener = filtervfs
351 351
352 352 def canonpath(root, cwd, myname, auditor=None):
353 353 '''return the canonical path of myname, given cwd and root'''
354 354 if util.endswithsep(root):
355 355 rootsep = root
356 356 else:
357 357 rootsep = root + os.sep
358 358 name = myname
359 359 if not os.path.isabs(name):
360 360 name = os.path.join(root, cwd, name)
361 361 name = os.path.normpath(name)
362 362 if auditor is None:
363 363 auditor = pathauditor(root)
364 364 if name != rootsep and name.startswith(rootsep):
365 365 name = name[len(rootsep):]
366 366 auditor(name)
367 367 return util.pconvert(name)
368 368 elif name == root:
369 369 return ''
370 370 else:
371 371 # Determine whether `name' is in the hierarchy at or beneath `root',
372 372 # by iterating name=dirname(name) until that causes no change (can't
373 373 # check name == '/', because that doesn't work on windows). The list
374 374 # `rel' holds the reversed list of components making up the relative
375 375 # file name we want.
376 376 rel = []
377 377 while True:
378 378 try:
379 379 s = util.samefile(name, root)
380 380 except OSError:
381 381 s = False
382 382 if s:
383 383 if not rel:
384 384 # name was actually the same as root (maybe a symlink)
385 385 return ''
386 386 rel.reverse()
387 387 name = os.path.join(*rel)
388 388 auditor(name)
389 389 return util.pconvert(name)
390 390 dirname, basename = util.split(name)
391 391 rel.append(basename)
392 392 if dirname == name:
393 393 break
394 394 name = dirname
395 395
396 396 raise util.Abort('%s not under root' % myname)
397 397
398 398 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
399 399 '''yield every hg repository under path, always recursively.
400 400 The recurse flag will only control recursion into repo working dirs'''
401 401 def errhandler(err):
402 402 if err.filename == path:
403 403 raise err
404 404 samestat = getattr(os.path, 'samestat', None)
405 405 if followsym and samestat is not None:
406 406 def adddir(dirlst, dirname):
407 407 match = False
408 408 dirstat = os.stat(dirname)
409 409 for lstdirstat in dirlst:
410 410 if samestat(dirstat, lstdirstat):
411 411 match = True
412 412 break
413 413 if not match:
414 414 dirlst.append(dirstat)
415 415 return not match
416 416 else:
417 417 followsym = False
418 418
419 419 if (seen_dirs is None) and followsym:
420 420 seen_dirs = []
421 421 adddir(seen_dirs, path)
422 422 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
423 423 dirs.sort()
424 424 if '.hg' in dirs:
425 425 yield root # found a repository
426 426 qroot = os.path.join(root, '.hg', 'patches')
427 427 if os.path.isdir(os.path.join(qroot, '.hg')):
428 428 yield qroot # we have a patch queue repo here
429 429 if recurse:
430 430 # avoid recursing inside the .hg directory
431 431 dirs.remove('.hg')
432 432 else:
433 433 dirs[:] = [] # don't descend further
434 434 elif followsym:
435 435 newdirs = []
436 436 for d in dirs:
437 437 fname = os.path.join(root, d)
438 438 if adddir(seen_dirs, fname):
439 439 if os.path.islink(fname):
440 440 for hgname in walkrepos(fname, True, seen_dirs):
441 441 yield hgname
442 442 else:
443 443 newdirs.append(d)
444 444 dirs[:] = newdirs
445 445
446 446 def osrcpath():
447 447 '''return default os-specific hgrc search path'''
448 448 path = systemrcpath()
449 449 path.extend(userrcpath())
450 450 path = [os.path.normpath(f) for f in path]
451 451 return path
452 452
453 453 _rcpath = None
454 454
455 455 def rcpath():
456 456 '''return hgrc search path. if env var HGRCPATH is set, use it.
457 457 for each item in path, if directory, use files ending in .rc,
458 458 else use item.
459 459 make HGRCPATH empty to only look in .hg/hgrc of current repo.
460 460 if no HGRCPATH, use default os-specific path.'''
461 461 global _rcpath
462 462 if _rcpath is None:
463 463 if 'HGRCPATH' in os.environ:
464 464 _rcpath = []
465 465 for p in os.environ['HGRCPATH'].split(os.pathsep):
466 466 if not p:
467 467 continue
468 468 p = util.expandpath(p)
469 469 if os.path.isdir(p):
470 470 for f, kind in osutil.listdir(p):
471 471 if f.endswith('.rc'):
472 472 _rcpath.append(os.path.join(p, f))
473 473 else:
474 474 _rcpath.append(p)
475 475 else:
476 476 _rcpath = osrcpath()
477 477 return _rcpath
478 478
479 479 if os.name != 'nt':
480 480
481 481 def rcfiles(path):
482 482 rcs = [os.path.join(path, 'hgrc')]
483 483 rcdir = os.path.join(path, 'hgrc.d')
484 484 try:
485 485 rcs.extend([os.path.join(rcdir, f)
486 486 for f, kind in osutil.listdir(rcdir)
487 487 if f.endswith(".rc")])
488 488 except OSError:
489 489 pass
490 490 return rcs
491 491
492 492 def systemrcpath():
493 493 path = []
494 494 if sys.platform == 'plan9':
495 495 root = 'lib/mercurial'
496 496 else:
497 497 root = 'etc/mercurial'
498 498 # old mod_python does not set sys.argv
499 499 if len(getattr(sys, 'argv', [])) > 0:
500 500 p = os.path.dirname(os.path.dirname(sys.argv[0]))
501 501 path.extend(rcfiles(os.path.join(p, root)))
502 502 path.extend(rcfiles('/' + root))
503 503 return path
504 504
505 505 def userrcpath():
506 506 if sys.platform == 'plan9':
507 507 return [os.environ['home'] + '/lib/hgrc']
508 508 else:
509 509 return [os.path.expanduser('~/.hgrc')]
510 510
511 511 else:
512 512
513 513 import _winreg
514 514
515 515 def systemrcpath():
516 516 '''return default os-specific hgrc search path'''
517 517 rcpath = []
518 518 filename = util.executablepath()
519 519 # Use mercurial.ini found in directory with hg.exe
520 520 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
521 521 if os.path.isfile(progrc):
522 522 rcpath.append(progrc)
523 523 return rcpath
524 524 # Use hgrc.d found in directory with hg.exe
525 525 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
526 526 if os.path.isdir(progrcd):
527 527 for f, kind in osutil.listdir(progrcd):
528 528 if f.endswith('.rc'):
529 529 rcpath.append(os.path.join(progrcd, f))
530 530 return rcpath
531 531 # else look for a system rcpath in the registry
532 532 value = util.lookupreg('SOFTWARE\\Mercurial', None,
533 533 _winreg.HKEY_LOCAL_MACHINE)
534 534 if not isinstance(value, str) or not value:
535 535 return rcpath
536 536 value = util.localpath(value)
537 537 for p in value.split(os.pathsep):
538 538 if p.lower().endswith('mercurial.ini'):
539 539 rcpath.append(p)
540 540 elif os.path.isdir(p):
541 541 for f, kind in osutil.listdir(p):
542 542 if f.endswith('.rc'):
543 543 rcpath.append(os.path.join(p, f))
544 544 return rcpath
545 545
546 546 def userrcpath():
547 547 '''return os-specific hgrc search path to the user dir'''
548 548 home = os.path.expanduser('~')
549 549 path = [os.path.join(home, 'mercurial.ini'),
550 550 os.path.join(home, '.hgrc')]
551 551 userprofile = os.environ.get('USERPROFILE')
552 552 if userprofile:
553 553 path.append(os.path.join(userprofile, 'mercurial.ini'))
554 554 path.append(os.path.join(userprofile, '.hgrc'))
555 555 return path
556 556
557 557 def revsingle(repo, revspec, default='.'):
558 558 if not revspec:
559 559 return repo[default]
560 560
561 561 l = revrange(repo, [revspec])
562 562 if len(l) < 1:
563 563 raise util.Abort(_('empty revision set'))
564 564 return repo[l[-1]]
565 565
566 566 def revpair(repo, revs):
567 567 if not revs:
568 568 return repo.dirstate.p1(), None
569 569
570 570 l = revrange(repo, revs)
571 571
572 572 if len(l) == 0:
573 573 if revs:
574 574 raise util.Abort(_('empty revision range'))
575 575 return repo.dirstate.p1(), None
576 576
577 577 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
578 578 return repo.lookup(l[0]), None
579 579
580 580 return repo.lookup(l[0]), repo.lookup(l[-1])
581 581
582 582 _revrangesep = ':'
583 583
584 584 def revrange(repo, revs):
585 585 """Yield revision as strings from a list of revision specifications."""
586 586
587 587 def revfix(repo, val, defval):
588 588 if not val and val != 0 and defval is not None:
589 589 return defval
590 590 return repo[val].rev()
591 591
592 592 seen, l = set(), []
593 593 for spec in revs:
594 594 if l and not seen:
595 595 seen = set(l)
596 596 # attempt to parse old-style ranges first to deal with
597 597 # things like old-tag which contain query metacharacters
598 598 try:
599 599 if isinstance(spec, int):
600 600 seen.add(spec)
601 601 l.append(spec)
602 602 continue
603 603
604 604 if _revrangesep in spec:
605 605 start, end = spec.split(_revrangesep, 1)
606 606 start = revfix(repo, start, 0)
607 607 end = revfix(repo, end, len(repo) - 1)
608 608 step = start > end and -1 or 1
609 609 if not seen and not l:
610 610 # by far the most common case: revs = ["-1:0"]
611 611 l = range(start, end + step, step)
612 612 # defer syncing seen until next iteration
613 613 continue
614 614 newrevs = set(xrange(start, end + step, step))
615 615 if seen:
616 616 newrevs.difference_update(seen)
617 617 seen.update(newrevs)
618 618 else:
619 619 seen = newrevs
620 620 l.extend(sorted(newrevs, reverse=start > end))
621 621 continue
622 622 elif spec and spec in repo: # single unquoted rev
623 623 rev = revfix(repo, spec, None)
624 624 if rev in seen:
625 625 continue
626 626 seen.add(rev)
627 627 l.append(rev)
628 628 continue
629 629 except error.RepoLookupError:
630 630 pass
631 631
632 632 # fall through to new-style queries if old-style fails
633 633 m = revset.match(repo.ui, spec)
634 dl = [r for r in m(repo, xrange(len(repo))) if r not in seen]
634 dl = [r for r in m(repo, list(repo)) if r not in seen]
635 635 l.extend(dl)
636 636 seen.update(dl)
637 637
638 638 return l
639 639
640 640 def expandpats(pats):
641 641 if not util.expandglobs:
642 642 return list(pats)
643 643 ret = []
644 644 for p in pats:
645 645 kind, name = matchmod._patsplit(p, None)
646 646 if kind is None:
647 647 try:
648 648 globbed = glob.glob(name)
649 649 except re.error:
650 650 globbed = [name]
651 651 if globbed:
652 652 ret.extend(globbed)
653 653 continue
654 654 ret.append(p)
655 655 return ret
656 656
657 657 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
658 658 if pats == ("",):
659 659 pats = []
660 660 if not globbed and default == 'relpath':
661 661 pats = expandpats(pats or [])
662 662
663 663 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
664 664 default)
665 665 def badfn(f, msg):
666 666 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
667 667 m.bad = badfn
668 668 return m, pats
669 669
670 670 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
671 671 return matchandpats(ctx, pats, opts, globbed, default)[0]
672 672
673 673 def matchall(repo):
674 674 return matchmod.always(repo.root, repo.getcwd())
675 675
676 676 def matchfiles(repo, files):
677 677 return matchmod.exact(repo.root, repo.getcwd(), files)
678 678
679 679 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
680 680 if dry_run is None:
681 681 dry_run = opts.get('dry_run')
682 682 if similarity is None:
683 683 similarity = float(opts.get('similarity') or 0)
684 684 # we'd use status here, except handling of symlinks and ignore is tricky
685 685 added, unknown, deleted, removed = [], [], [], []
686 686 audit_path = pathauditor(repo.root)
687 687 m = match(repo[None], pats, opts)
688 688 rejected = []
689 689 m.bad = lambda x, y: rejected.append(x)
690 690
691 691 for abs in repo.walk(m):
692 692 target = repo.wjoin(abs)
693 693 good = True
694 694 try:
695 695 audit_path(abs)
696 696 except (OSError, util.Abort):
697 697 good = False
698 698 rel = m.rel(abs)
699 699 exact = m.exact(abs)
700 700 if good and abs not in repo.dirstate:
701 701 unknown.append(abs)
702 702 if repo.ui.verbose or not exact:
703 703 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
704 704 elif (repo.dirstate[abs] != 'r' and
705 705 (not good or not os.path.lexists(target) or
706 706 (os.path.isdir(target) and not os.path.islink(target)))):
707 707 deleted.append(abs)
708 708 if repo.ui.verbose or not exact:
709 709 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
710 710 # for finding renames
711 711 elif repo.dirstate[abs] == 'r':
712 712 removed.append(abs)
713 713 elif repo.dirstate[abs] == 'a':
714 714 added.append(abs)
715 715 copies = {}
716 716 if similarity > 0:
717 717 for old, new, score in similar.findrenames(repo,
718 718 added + unknown, removed + deleted, similarity):
719 719 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
720 720 repo.ui.status(_('recording removal of %s as rename to %s '
721 721 '(%d%% similar)\n') %
722 722 (m.rel(old), m.rel(new), score * 100))
723 723 copies[new] = old
724 724
725 725 if not dry_run:
726 726 wctx = repo[None]
727 727 wlock = repo.wlock()
728 728 try:
729 729 wctx.forget(deleted)
730 730 wctx.add(unknown)
731 731 for new, old in copies.iteritems():
732 732 wctx.copy(old, new)
733 733 finally:
734 734 wlock.release()
735 735
736 736 for f in rejected:
737 737 if f in m.files():
738 738 return 1
739 739 return 0
740 740
741 741 def updatedir(ui, repo, patches, similarity=0):
742 742 '''Update dirstate after patch application according to metadata'''
743 743 if not patches:
744 744 return []
745 745 copies = []
746 746 removes = set()
747 747 cfiles = patches.keys()
748 748 cwd = repo.getcwd()
749 749 if cwd:
750 750 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
751 751 for f in patches:
752 752 gp = patches[f]
753 753 if not gp:
754 754 continue
755 755 if gp.op == 'RENAME':
756 756 copies.append((gp.oldpath, gp.path))
757 757 removes.add(gp.oldpath)
758 758 elif gp.op == 'COPY':
759 759 copies.append((gp.oldpath, gp.path))
760 760 elif gp.op == 'DELETE':
761 761 removes.add(gp.path)
762 762
763 763 wctx = repo[None]
764 764 for src, dst in copies:
765 765 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
766 766 if (not similarity) and removes:
767 767 wctx.remove(sorted(removes), True)
768 768
769 769 for f in patches:
770 770 gp = patches[f]
771 771 if gp and gp.mode:
772 772 islink, isexec = gp.mode
773 773 dst = repo.wjoin(gp.path)
774 774 # patch won't create empty files
775 775 if gp.op == 'ADD' and not os.path.lexists(dst):
776 776 flags = (isexec and 'x' or '') + (islink and 'l' or '')
777 777 repo.wwrite(gp.path, '', flags)
778 778 util.setflags(dst, islink, isexec)
779 779 addremove(repo, cfiles, similarity=similarity)
780 780 files = patches.keys()
781 781 files.extend([r for r in removes if r not in files])
782 782 return sorted(files)
783 783
784 784 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
785 785 """Update the dirstate to reflect the intent of copying src to dst. For
786 786 different reasons it might not end with dst being marked as copied from src.
787 787 """
788 788 origsrc = repo.dirstate.copied(src) or src
789 789 if dst == origsrc: # copying back a copy?
790 790 if repo.dirstate[dst] not in 'mn' and not dryrun:
791 791 repo.dirstate.normallookup(dst)
792 792 else:
793 793 if repo.dirstate[origsrc] == 'a' and origsrc == src:
794 794 if not ui.quiet:
795 795 ui.warn(_("%s has not been committed yet, so no copy "
796 796 "data will be stored for %s.\n")
797 797 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
798 798 if repo.dirstate[dst] in '?r' and not dryrun:
799 799 wctx.add([dst])
800 800 elif not dryrun:
801 801 wctx.copy(origsrc, dst)
802 802
803 803 def readrequires(opener, supported):
804 804 '''Reads and parses .hg/requires and checks if all entries found
805 805 are in the list of supported features.'''
806 806 requirements = set(opener.read("requires").splitlines())
807 807 missings = []
808 808 for r in requirements:
809 809 if r not in supported:
810 810 if not r or not r[0].isalnum():
811 811 raise error.RequirementError(_(".hg/requires file is corrupt"))
812 812 missings.append(r)
813 813 missings.sort()
814 814 if missings:
815 815 raise error.RequirementError(
816 816 _("unknown repository format: requires features '%s' (upgrade "
817 817 "Mercurial)") % "', '".join(missings))
818 818 return requirements
819 819
820 820 class filecacheentry(object):
821 821 def __init__(self, path):
822 822 self.path = path
823 823 self.cachestat = filecacheentry.stat(self.path)
824 824
825 825 if self.cachestat:
826 826 self._cacheable = self.cachestat.cacheable()
827 827 else:
828 828 # None means we don't know yet
829 829 self._cacheable = None
830 830
831 831 def refresh(self):
832 832 if self.cacheable():
833 833 self.cachestat = filecacheentry.stat(self.path)
834 834
835 835 def cacheable(self):
836 836 if self._cacheable is not None:
837 837 return self._cacheable
838 838
839 839 # we don't know yet, assume it is for now
840 840 return True
841 841
842 842 def changed(self):
843 843 # no point in going further if we can't cache it
844 844 if not self.cacheable():
845 845 return True
846 846
847 847 newstat = filecacheentry.stat(self.path)
848 848
849 849 # we may not know if it's cacheable yet, check again now
850 850 if newstat and self._cacheable is None:
851 851 self._cacheable = newstat.cacheable()
852 852
853 853 # check again
854 854 if not self._cacheable:
855 855 return True
856 856
857 857 if self.cachestat != newstat:
858 858 self.cachestat = newstat
859 859 return True
860 860 else:
861 861 return False
862 862
863 863 @staticmethod
864 864 def stat(path):
865 865 try:
866 866 return util.cachestat(path)
867 867 except OSError, e:
868 868 if e.errno != errno.ENOENT:
869 869 raise
870 870
871 871 class filecache(object):
872 872 '''A property like decorator that tracks a file under .hg/ for updates.
873 873
874 874 Records stat info when called in _filecache.
875 875
876 876 On subsequent calls, compares old stat info with new info, and recreates
877 877 the object when needed, updating the new stat info in _filecache.
878 878
879 879 Mercurial either atomic renames or appends for files under .hg,
880 880 so to ensure the cache is reliable we need the filesystem to be able
881 881 to tell us if a file has been replaced. If it can't, we fallback to
882 882 recreating the object on every call (essentially the same behaviour as
883 883 propertycache).'''
884 884 def __init__(self, path):
885 885 self.path = path
886 886
887 887 def join(self, obj, fname):
888 888 """Used to compute the runtime path of the cached file.
889 889
890 890 Users should subclass filecache and provide their own version of this
891 891 function to call the appropriate join function on 'obj' (an instance
892 892 of the class that its member function was decorated).
893 893 """
894 894 return obj.join(fname)
895 895
896 896 def __call__(self, func):
897 897 self.func = func
898 898 self.name = func.__name__
899 899 return self
900 900
901 901 def __get__(self, obj, type=None):
902 902 # do we need to check if the file changed?
903 903 if self.name in obj.__dict__:
904 904 return obj.__dict__[self.name]
905 905
906 906 entry = obj._filecache.get(self.name)
907 907
908 908 if entry:
909 909 if entry.changed():
910 910 entry.obj = self.func(obj)
911 911 else:
912 912 path = self.join(obj, self.path)
913 913
914 914 # We stat -before- creating the object so our cache doesn't lie if
915 915 # a writer modified between the time we read and stat
916 916 entry = filecacheentry(path)
917 917 entry.obj = self.func(obj)
918 918
919 919 obj._filecache[self.name] = entry
920 920
921 921 obj.__dict__[self.name] = entry.obj
922 922 return entry.obj
923 923
924 924 def __set__(self, obj, value):
925 925 if self.name in obj._filecache:
926 926 obj._filecache[self.name].obj = value # update cached copy
927 927 obj.__dict__[self.name] = value # update copy returned by obj.x
928 928
929 929 def __delete__(self, obj):
930 930 try:
931 931 del obj.__dict__[self.name]
932 932 except KeyError:
933 933 raise AttributeError, self.name
General Comments 0
You need to be logged in to leave comments. Login now