##// END OF EJS Templates
subrepos: add function for iterating over ctx subrepos
Martin Geisler -
r12176:ecab1082 default
parent child Browse files
Show More
@@ -1,1300 +1,1293 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, bin, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, glob, tempfile
11 11 import util, templater, patch, error, encoding, templatekw
12 12 import match as matchmod
13 import similar, revset
13 import similar, revset, subrepo
14 14
15 15 revrangesep = ':'
16 16
17 17 def parsealiases(cmd):
18 18 return cmd.lstrip("^").split("|")
19 19
20 20 def findpossible(cmd, table, strict=False):
21 21 """
22 22 Return cmd -> (aliases, command table entry)
23 23 for each matching command.
24 24 Return debug commands (or their aliases) only if no normal command matches.
25 25 """
26 26 choice = {}
27 27 debugchoice = {}
28 28 for e in table.keys():
29 29 aliases = parsealiases(e)
30 30 found = None
31 31 if cmd in aliases:
32 32 found = cmd
33 33 elif not strict:
34 34 for a in aliases:
35 35 if a.startswith(cmd):
36 36 found = a
37 37 break
38 38 if found is not None:
39 39 if aliases[0].startswith("debug") or found.startswith("debug"):
40 40 debugchoice[found] = (aliases, table[e])
41 41 else:
42 42 choice[found] = (aliases, table[e])
43 43
44 44 if not choice and debugchoice:
45 45 choice = debugchoice
46 46
47 47 return choice
48 48
49 49 def findcmd(cmd, table, strict=True):
50 50 """Return (aliases, command table entry) for command string."""
51 51 choice = findpossible(cmd, table, strict)
52 52
53 53 if cmd in choice:
54 54 return choice[cmd]
55 55
56 56 if len(choice) > 1:
57 57 clist = choice.keys()
58 58 clist.sort()
59 59 raise error.AmbiguousCommand(cmd, clist)
60 60
61 61 if choice:
62 62 return choice.values()[0]
63 63
64 64 raise error.UnknownCommand(cmd)
65 65
66 66 def findrepo(p):
67 67 while not os.path.isdir(os.path.join(p, ".hg")):
68 68 oldp, p = p, os.path.dirname(p)
69 69 if p == oldp:
70 70 return None
71 71
72 72 return p
73 73
74 74 def bail_if_changed(repo):
75 75 if repo.dirstate.parents()[1] != nullid:
76 76 raise util.Abort(_('outstanding uncommitted merge'))
77 77 modified, added, removed, deleted = repo.status()[:4]
78 78 if modified or added or removed or deleted:
79 79 raise util.Abort(_("outstanding uncommitted changes"))
80 80
81 81 def logmessage(opts):
82 82 """ get the log message according to -m and -l option """
83 83 message = opts.get('message')
84 84 logfile = opts.get('logfile')
85 85
86 86 if message and logfile:
87 87 raise util.Abort(_('options --message and --logfile are mutually '
88 88 'exclusive'))
89 89 if not message and logfile:
90 90 try:
91 91 if logfile == '-':
92 92 message = sys.stdin.read()
93 93 else:
94 94 message = open(logfile).read()
95 95 except IOError, inst:
96 96 raise util.Abort(_("can't read commit message '%s': %s") %
97 97 (logfile, inst.strerror))
98 98 return message
99 99
100 100 def loglimit(opts):
101 101 """get the log limit according to option -l/--limit"""
102 102 limit = opts.get('limit')
103 103 if limit:
104 104 try:
105 105 limit = int(limit)
106 106 except ValueError:
107 107 raise util.Abort(_('limit must be a positive integer'))
108 108 if limit <= 0:
109 109 raise util.Abort(_('limit must be positive'))
110 110 else:
111 111 limit = None
112 112 return limit
113 113
114 114 def revpair(repo, revs):
115 115 '''return pair of nodes, given list of revisions. second item can
116 116 be None, meaning use working dir.'''
117 117
118 118 def revfix(repo, val, defval):
119 119 if not val and val != 0 and defval is not None:
120 120 val = defval
121 121 return repo.lookup(val)
122 122
123 123 if not revs:
124 124 return repo.dirstate.parents()[0], None
125 125 end = None
126 126 if len(revs) == 1:
127 127 if revrangesep in revs[0]:
128 128 start, end = revs[0].split(revrangesep, 1)
129 129 start = revfix(repo, start, 0)
130 130 end = revfix(repo, end, len(repo) - 1)
131 131 else:
132 132 start = revfix(repo, revs[0], None)
133 133 elif len(revs) == 2:
134 134 if revrangesep in revs[0] or revrangesep in revs[1]:
135 135 raise util.Abort(_('too many revisions specified'))
136 136 start = revfix(repo, revs[0], None)
137 137 end = revfix(repo, revs[1], None)
138 138 else:
139 139 raise util.Abort(_('too many revisions specified'))
140 140 return start, end
141 141
142 142 def revrange(repo, revs):
143 143 """Yield revision as strings from a list of revision specifications."""
144 144
145 145 def revfix(repo, val, defval):
146 146 if not val and val != 0 and defval is not None:
147 147 return defval
148 148 return repo.changelog.rev(repo.lookup(val))
149 149
150 150 seen, l = set(), []
151 151 for spec in revs:
152 152 # attempt to parse old-style ranges first to deal with
153 153 # things like old-tag which contain query metacharacters
154 154 try:
155 155 if revrangesep in spec:
156 156 start, end = spec.split(revrangesep, 1)
157 157 start = revfix(repo, start, 0)
158 158 end = revfix(repo, end, len(repo) - 1)
159 159 step = start > end and -1 or 1
160 160 for rev in xrange(start, end + step, step):
161 161 if rev in seen:
162 162 continue
163 163 seen.add(rev)
164 164 l.append(rev)
165 165 continue
166 166 elif spec and spec in repo: # single unquoted rev
167 167 rev = revfix(repo, spec, None)
168 168 if rev in seen:
169 169 continue
170 170 seen.add(rev)
171 171 l.append(rev)
172 172 continue
173 173 except error.RepoLookupError:
174 174 pass
175 175
176 176 # fall through to new-style queries if old-style fails
177 177 m = revset.match(spec)
178 178 for r in m(repo, range(len(repo))):
179 179 if r not in seen:
180 180 l.append(r)
181 181 seen.update(l)
182 182
183 183 return l
184 184
185 185 def make_filename(repo, pat, node,
186 186 total=None, seqno=None, revwidth=None, pathname=None):
187 187 node_expander = {
188 188 'H': lambda: hex(node),
189 189 'R': lambda: str(repo.changelog.rev(node)),
190 190 'h': lambda: short(node),
191 191 }
192 192 expander = {
193 193 '%': lambda: '%',
194 194 'b': lambda: os.path.basename(repo.root),
195 195 }
196 196
197 197 try:
198 198 if node:
199 199 expander.update(node_expander)
200 200 if node:
201 201 expander['r'] = (lambda:
202 202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
203 203 if total is not None:
204 204 expander['N'] = lambda: str(total)
205 205 if seqno is not None:
206 206 expander['n'] = lambda: str(seqno)
207 207 if total is not None and seqno is not None:
208 208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
209 209 if pathname is not None:
210 210 expander['s'] = lambda: os.path.basename(pathname)
211 211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
212 212 expander['p'] = lambda: pathname
213 213
214 214 newname = []
215 215 patlen = len(pat)
216 216 i = 0
217 217 while i < patlen:
218 218 c = pat[i]
219 219 if c == '%':
220 220 i += 1
221 221 c = pat[i]
222 222 c = expander[c]()
223 223 newname.append(c)
224 224 i += 1
225 225 return ''.join(newname)
226 226 except KeyError, inst:
227 227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
228 228 inst.args[0])
229 229
230 230 def make_file(repo, pat, node=None,
231 231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
232 232
233 233 writable = 'w' in mode or 'a' in mode
234 234
235 235 if not pat or pat == '-':
236 236 return writable and sys.stdout or sys.stdin
237 237 if hasattr(pat, 'write') and writable:
238 238 return pat
239 239 if hasattr(pat, 'read') and 'r' in mode:
240 240 return pat
241 241 return open(make_filename(repo, pat, node, total, seqno, revwidth,
242 242 pathname),
243 243 mode)
244 244
245 245 def expandpats(pats):
246 246 if not util.expandglobs:
247 247 return list(pats)
248 248 ret = []
249 249 for p in pats:
250 250 kind, name = matchmod._patsplit(p, None)
251 251 if kind is None:
252 252 try:
253 253 globbed = glob.glob(name)
254 254 except re.error:
255 255 globbed = [name]
256 256 if globbed:
257 257 ret.extend(globbed)
258 258 continue
259 259 ret.append(p)
260 260 return ret
261 261
262 262 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
263 263 if not globbed and default == 'relpath':
264 264 pats = expandpats(pats or [])
265 265 m = matchmod.match(repo.root, repo.getcwd(), pats,
266 266 opts.get('include'), opts.get('exclude'), default,
267 267 auditor=repo.auditor)
268 268 def badfn(f, msg):
269 269 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
270 270 m.bad = badfn
271 271 return m
272 272
273 273 def matchall(repo):
274 274 return matchmod.always(repo.root, repo.getcwd())
275 275
276 276 def matchfiles(repo, files):
277 277 return matchmod.exact(repo.root, repo.getcwd(), files)
278 278
279 279 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
280 280 if dry_run is None:
281 281 dry_run = opts.get('dry_run')
282 282 if similarity is None:
283 283 similarity = float(opts.get('similarity') or 0)
284 284 # we'd use status here, except handling of symlinks and ignore is tricky
285 285 added, unknown, deleted, removed = [], [], [], []
286 286 audit_path = util.path_auditor(repo.root)
287 287 m = match(repo, pats, opts)
288 288 for abs in repo.walk(m):
289 289 target = repo.wjoin(abs)
290 290 good = True
291 291 try:
292 292 audit_path(abs)
293 293 except:
294 294 good = False
295 295 rel = m.rel(abs)
296 296 exact = m.exact(abs)
297 297 if good and abs not in repo.dirstate:
298 298 unknown.append(abs)
299 299 if repo.ui.verbose or not exact:
300 300 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
301 301 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
302 302 or (os.path.isdir(target) and not os.path.islink(target))):
303 303 deleted.append(abs)
304 304 if repo.ui.verbose or not exact:
305 305 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
306 306 # for finding renames
307 307 elif repo.dirstate[abs] == 'r':
308 308 removed.append(abs)
309 309 elif repo.dirstate[abs] == 'a':
310 310 added.append(abs)
311 311 copies = {}
312 312 if similarity > 0:
313 313 for old, new, score in similar.findrenames(repo,
314 314 added + unknown, removed + deleted, similarity):
315 315 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
316 316 repo.ui.status(_('recording removal of %s as rename to %s '
317 317 '(%d%% similar)\n') %
318 318 (m.rel(old), m.rel(new), score * 100))
319 319 copies[new] = old
320 320
321 321 if not dry_run:
322 322 wctx = repo[None]
323 323 wlock = repo.wlock()
324 324 try:
325 325 wctx.remove(deleted)
326 326 wctx.add(unknown)
327 327 for new, old in copies.iteritems():
328 328 wctx.copy(old, new)
329 329 finally:
330 330 wlock.release()
331 331
332 332 def copy(ui, repo, pats, opts, rename=False):
333 333 # called with the repo lock held
334 334 #
335 335 # hgsep => pathname that uses "/" to separate directories
336 336 # ossep => pathname that uses os.sep to separate directories
337 337 cwd = repo.getcwd()
338 338 targets = {}
339 339 after = opts.get("after")
340 340 dryrun = opts.get("dry_run")
341 341 wctx = repo[None]
342 342
343 343 def walkpat(pat):
344 344 srcs = []
345 345 badstates = after and '?' or '?r'
346 346 m = match(repo, [pat], opts, globbed=True)
347 347 for abs in repo.walk(m):
348 348 state = repo.dirstate[abs]
349 349 rel = m.rel(abs)
350 350 exact = m.exact(abs)
351 351 if state in badstates:
352 352 if exact and state == '?':
353 353 ui.warn(_('%s: not copying - file is not managed\n') % rel)
354 354 if exact and state == 'r':
355 355 ui.warn(_('%s: not copying - file has been marked for'
356 356 ' remove\n') % rel)
357 357 continue
358 358 # abs: hgsep
359 359 # rel: ossep
360 360 srcs.append((abs, rel, exact))
361 361 return srcs
362 362
363 363 # abssrc: hgsep
364 364 # relsrc: ossep
365 365 # otarget: ossep
366 366 def copyfile(abssrc, relsrc, otarget, exact):
367 367 abstarget = util.canonpath(repo.root, cwd, otarget)
368 368 reltarget = repo.pathto(abstarget, cwd)
369 369 target = repo.wjoin(abstarget)
370 370 src = repo.wjoin(abssrc)
371 371 state = repo.dirstate[abstarget]
372 372
373 373 # check for collisions
374 374 prevsrc = targets.get(abstarget)
375 375 if prevsrc is not None:
376 376 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
377 377 (reltarget, repo.pathto(abssrc, cwd),
378 378 repo.pathto(prevsrc, cwd)))
379 379 return
380 380
381 381 # check for overwrites
382 382 exists = os.path.exists(target)
383 383 if not after and exists or after and state in 'mn':
384 384 if not opts['force']:
385 385 ui.warn(_('%s: not overwriting - file exists\n') %
386 386 reltarget)
387 387 return
388 388
389 389 if after:
390 390 if not exists:
391 391 if rename:
392 392 ui.warn(_('%s: not recording move - %s does not exist\n') %
393 393 (relsrc, reltarget))
394 394 else:
395 395 ui.warn(_('%s: not recording copy - %s does not exist\n') %
396 396 (relsrc, reltarget))
397 397 return
398 398 elif not dryrun:
399 399 try:
400 400 if exists:
401 401 os.unlink(target)
402 402 targetdir = os.path.dirname(target) or '.'
403 403 if not os.path.isdir(targetdir):
404 404 os.makedirs(targetdir)
405 405 util.copyfile(src, target)
406 406 except IOError, inst:
407 407 if inst.errno == errno.ENOENT:
408 408 ui.warn(_('%s: deleted in working copy\n') % relsrc)
409 409 else:
410 410 ui.warn(_('%s: cannot copy - %s\n') %
411 411 (relsrc, inst.strerror))
412 412 return True # report a failure
413 413
414 414 if ui.verbose or not exact:
415 415 if rename:
416 416 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
417 417 else:
418 418 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
419 419
420 420 targets[abstarget] = abssrc
421 421
422 422 # fix up dirstate
423 423 origsrc = repo.dirstate.copied(abssrc) or abssrc
424 424 if abstarget == origsrc: # copying back a copy?
425 425 if state not in 'mn' and not dryrun:
426 426 repo.dirstate.normallookup(abstarget)
427 427 else:
428 428 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
429 429 if not ui.quiet:
430 430 ui.warn(_("%s has not been committed yet, so no copy "
431 431 "data will be stored for %s.\n")
432 432 % (repo.pathto(origsrc, cwd), reltarget))
433 433 if repo.dirstate[abstarget] in '?r' and not dryrun:
434 434 wctx.add([abstarget])
435 435 elif not dryrun:
436 436 wctx.copy(origsrc, abstarget)
437 437
438 438 if rename and not dryrun:
439 439 wctx.remove([abssrc], not after)
440 440
441 441 # pat: ossep
442 442 # dest ossep
443 443 # srcs: list of (hgsep, hgsep, ossep, bool)
444 444 # return: function that takes hgsep and returns ossep
445 445 def targetpathfn(pat, dest, srcs):
446 446 if os.path.isdir(pat):
447 447 abspfx = util.canonpath(repo.root, cwd, pat)
448 448 abspfx = util.localpath(abspfx)
449 449 if destdirexists:
450 450 striplen = len(os.path.split(abspfx)[0])
451 451 else:
452 452 striplen = len(abspfx)
453 453 if striplen:
454 454 striplen += len(os.sep)
455 455 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
456 456 elif destdirexists:
457 457 res = lambda p: os.path.join(dest,
458 458 os.path.basename(util.localpath(p)))
459 459 else:
460 460 res = lambda p: dest
461 461 return res
462 462
463 463 # pat: ossep
464 464 # dest ossep
465 465 # srcs: list of (hgsep, hgsep, ossep, bool)
466 466 # return: function that takes hgsep and returns ossep
467 467 def targetpathafterfn(pat, dest, srcs):
468 468 if matchmod.patkind(pat):
469 469 # a mercurial pattern
470 470 res = lambda p: os.path.join(dest,
471 471 os.path.basename(util.localpath(p)))
472 472 else:
473 473 abspfx = util.canonpath(repo.root, cwd, pat)
474 474 if len(abspfx) < len(srcs[0][0]):
475 475 # A directory. Either the target path contains the last
476 476 # component of the source path or it does not.
477 477 def evalpath(striplen):
478 478 score = 0
479 479 for s in srcs:
480 480 t = os.path.join(dest, util.localpath(s[0])[striplen:])
481 481 if os.path.exists(t):
482 482 score += 1
483 483 return score
484 484
485 485 abspfx = util.localpath(abspfx)
486 486 striplen = len(abspfx)
487 487 if striplen:
488 488 striplen += len(os.sep)
489 489 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
490 490 score = evalpath(striplen)
491 491 striplen1 = len(os.path.split(abspfx)[0])
492 492 if striplen1:
493 493 striplen1 += len(os.sep)
494 494 if evalpath(striplen1) > score:
495 495 striplen = striplen1
496 496 res = lambda p: os.path.join(dest,
497 497 util.localpath(p)[striplen:])
498 498 else:
499 499 # a file
500 500 if destdirexists:
501 501 res = lambda p: os.path.join(dest,
502 502 os.path.basename(util.localpath(p)))
503 503 else:
504 504 res = lambda p: dest
505 505 return res
506 506
507 507
508 508 pats = expandpats(pats)
509 509 if not pats:
510 510 raise util.Abort(_('no source or destination specified'))
511 511 if len(pats) == 1:
512 512 raise util.Abort(_('no destination specified'))
513 513 dest = pats.pop()
514 514 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
515 515 if not destdirexists:
516 516 if len(pats) > 1 or matchmod.patkind(pats[0]):
517 517 raise util.Abort(_('with multiple sources, destination must be an '
518 518 'existing directory'))
519 519 if util.endswithsep(dest):
520 520 raise util.Abort(_('destination %s is not a directory') % dest)
521 521
522 522 tfn = targetpathfn
523 523 if after:
524 524 tfn = targetpathafterfn
525 525 copylist = []
526 526 for pat in pats:
527 527 srcs = walkpat(pat)
528 528 if not srcs:
529 529 continue
530 530 copylist.append((tfn(pat, dest, srcs), srcs))
531 531 if not copylist:
532 532 raise util.Abort(_('no files to copy'))
533 533
534 534 errors = 0
535 535 for targetpath, srcs in copylist:
536 536 for abssrc, relsrc, exact in srcs:
537 537 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
538 538 errors += 1
539 539
540 540 if errors:
541 541 ui.warn(_('(consider using --after)\n'))
542 542
543 543 return errors != 0
544 544
545 545 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
546 546 runargs=None, appendpid=False):
547 547 '''Run a command as a service.'''
548 548
549 549 if opts['daemon'] and not opts['daemon_pipefds']:
550 550 # Signal child process startup with file removal
551 551 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
552 552 os.close(lockfd)
553 553 try:
554 554 if not runargs:
555 555 runargs = util.hgcmd() + sys.argv[1:]
556 556 runargs.append('--daemon-pipefds=%s' % lockpath)
557 557 # Don't pass --cwd to the child process, because we've already
558 558 # changed directory.
559 559 for i in xrange(1, len(runargs)):
560 560 if runargs[i].startswith('--cwd='):
561 561 del runargs[i]
562 562 break
563 563 elif runargs[i].startswith('--cwd'):
564 564 del runargs[i:i + 2]
565 565 break
566 566 def condfn():
567 567 return not os.path.exists(lockpath)
568 568 pid = util.rundetached(runargs, condfn)
569 569 if pid < 0:
570 570 raise util.Abort(_('child process failed to start'))
571 571 finally:
572 572 try:
573 573 os.unlink(lockpath)
574 574 except OSError, e:
575 575 if e.errno != errno.ENOENT:
576 576 raise
577 577 if parentfn:
578 578 return parentfn(pid)
579 579 else:
580 580 return
581 581
582 582 if initfn:
583 583 initfn()
584 584
585 585 if opts['pid_file']:
586 586 mode = appendpid and 'a' or 'w'
587 587 fp = open(opts['pid_file'], mode)
588 588 fp.write(str(os.getpid()) + '\n')
589 589 fp.close()
590 590
591 591 if opts['daemon_pipefds']:
592 592 lockpath = opts['daemon_pipefds']
593 593 try:
594 594 os.setsid()
595 595 except AttributeError:
596 596 pass
597 597 os.unlink(lockpath)
598 598 util.hidewindow()
599 599 sys.stdout.flush()
600 600 sys.stderr.flush()
601 601
602 602 nullfd = os.open(util.nulldev, os.O_RDWR)
603 603 logfilefd = nullfd
604 604 if logfile:
605 605 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
606 606 os.dup2(nullfd, 0)
607 607 os.dup2(logfilefd, 1)
608 608 os.dup2(logfilefd, 2)
609 609 if nullfd not in (0, 1, 2):
610 610 os.close(nullfd)
611 611 if logfile and logfilefd not in (0, 1, 2):
612 612 os.close(logfilefd)
613 613
614 614 if runfn:
615 615 return runfn()
616 616
617 617 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
618 618 opts=None):
619 619 '''export changesets as hg patches.'''
620 620
621 621 total = len(revs)
622 622 revwidth = max([len(str(rev)) for rev in revs])
623 623
624 624 def single(rev, seqno, fp):
625 625 ctx = repo[rev]
626 626 node = ctx.node()
627 627 parents = [p.node() for p in ctx.parents() if p]
628 628 branch = ctx.branch()
629 629 if switch_parent:
630 630 parents.reverse()
631 631 prev = (parents and parents[0]) or nullid
632 632
633 633 if not fp:
634 634 fp = make_file(repo, template, node, total=total, seqno=seqno,
635 635 revwidth=revwidth, mode='ab')
636 636 if fp != sys.stdout and hasattr(fp, 'name'):
637 637 repo.ui.note("%s\n" % fp.name)
638 638
639 639 fp.write("# HG changeset patch\n")
640 640 fp.write("# User %s\n" % ctx.user())
641 641 fp.write("# Date %d %d\n" % ctx.date())
642 642 if branch and branch != 'default':
643 643 fp.write("# Branch %s\n" % branch)
644 644 fp.write("# Node ID %s\n" % hex(node))
645 645 fp.write("# Parent %s\n" % hex(prev))
646 646 if len(parents) > 1:
647 647 fp.write("# Parent %s\n" % hex(parents[1]))
648 648 fp.write(ctx.description().rstrip())
649 649 fp.write("\n\n")
650 650
651 651 for chunk in patch.diff(repo, prev, node, opts=opts):
652 652 fp.write(chunk)
653 653
654 654 for seqno, rev in enumerate(revs):
655 655 single(rev, seqno + 1, fp)
656 656
657 657 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
658 658 changes=None, stat=False, fp=None, prefix='',
659 659 listsubrepos=False):
660 660 '''show diff or diffstat.'''
661 661 if fp is None:
662 662 write = ui.write
663 663 else:
664 664 def write(s, **kw):
665 665 fp.write(s)
666 666
667 667 if stat:
668 668 diffopts = diffopts.copy(context=0)
669 669 width = 80
670 670 if not ui.plain():
671 671 width = util.termwidth()
672 672 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
673 673 prefix=prefix)
674 674 for chunk, label in patch.diffstatui(util.iterlines(chunks),
675 675 width=width,
676 676 git=diffopts.git):
677 677 write(chunk, label=label)
678 678 else:
679 679 for chunk, label in patch.diffui(repo, node1, node2, match,
680 680 changes, diffopts, prefix=prefix):
681 681 write(chunk, label=label)
682 682
683 683 if listsubrepos:
684 684 ctx1 = repo[node1]
685 685 ctx2 = repo[node2]
686 # Create a (subpath, ctx) mapping where we prefer subpaths
687 # from ctx1. The subpaths from ctx2 are important when the
688 # .hgsub file has been modified (in ctx2) but not yet
689 # committed (in ctx1).
690 subpaths = dict.fromkeys(ctx2.substate, ctx2)
691 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
692 for subpath, ctx in subpaths.iteritems():
693 sub = ctx.sub(subpath)
686 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
694 687 if node2 is not None:
695 688 node2 = bin(ctx2.substate[subpath][1])
696 689 submatch = matchmod.narrowmatcher(subpath, match)
697 690 sub.diff(diffopts, node2, submatch, changes=changes,
698 691 stat=stat, fp=fp, prefix=prefix)
699 692
700 693 class changeset_printer(object):
701 694 '''show changeset information when templating not requested.'''
702 695
703 696 def __init__(self, ui, repo, patch, diffopts, buffered):
704 697 self.ui = ui
705 698 self.repo = repo
706 699 self.buffered = buffered
707 700 self.patch = patch
708 701 self.diffopts = diffopts
709 702 self.header = {}
710 703 self.hunk = {}
711 704 self.lastheader = None
712 705 self.footer = None
713 706
714 707 def flush(self, rev):
715 708 if rev in self.header:
716 709 h = self.header[rev]
717 710 if h != self.lastheader:
718 711 self.lastheader = h
719 712 self.ui.write(h)
720 713 del self.header[rev]
721 714 if rev in self.hunk:
722 715 self.ui.write(self.hunk[rev])
723 716 del self.hunk[rev]
724 717 return 1
725 718 return 0
726 719
727 720 def close(self):
728 721 if self.footer:
729 722 self.ui.write(self.footer)
730 723
731 724 def show(self, ctx, copies=None, matchfn=None, **props):
732 725 if self.buffered:
733 726 self.ui.pushbuffer()
734 727 self._show(ctx, copies, matchfn, props)
735 728 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
736 729 else:
737 730 self._show(ctx, copies, matchfn, props)
738 731
739 732 def _show(self, ctx, copies, matchfn, props):
740 733 '''show a single changeset or file revision'''
741 734 changenode = ctx.node()
742 735 rev = ctx.rev()
743 736
744 737 if self.ui.quiet:
745 738 self.ui.write("%d:%s\n" % (rev, short(changenode)),
746 739 label='log.node')
747 740 return
748 741
749 742 log = self.repo.changelog
750 743 date = util.datestr(ctx.date())
751 744
752 745 hexfunc = self.ui.debugflag and hex or short
753 746
754 747 parents = [(p, hexfunc(log.node(p)))
755 748 for p in self._meaningful_parentrevs(log, rev)]
756 749
757 750 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
758 751 label='log.changeset')
759 752
760 753 branch = ctx.branch()
761 754 # don't show the default branch name
762 755 if branch != 'default':
763 756 branch = encoding.tolocal(branch)
764 757 self.ui.write(_("branch: %s\n") % branch,
765 758 label='log.branch')
766 759 for tag in self.repo.nodetags(changenode):
767 760 self.ui.write(_("tag: %s\n") % tag,
768 761 label='log.tag')
769 762 for parent in parents:
770 763 self.ui.write(_("parent: %d:%s\n") % parent,
771 764 label='log.parent')
772 765
773 766 if self.ui.debugflag:
774 767 mnode = ctx.manifestnode()
775 768 self.ui.write(_("manifest: %d:%s\n") %
776 769 (self.repo.manifest.rev(mnode), hex(mnode)),
777 770 label='ui.debug log.manifest')
778 771 self.ui.write(_("user: %s\n") % ctx.user(),
779 772 label='log.user')
780 773 self.ui.write(_("date: %s\n") % date,
781 774 label='log.date')
782 775
783 776 if self.ui.debugflag:
784 777 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
785 778 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
786 779 files):
787 780 if value:
788 781 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
789 782 label='ui.debug log.files')
790 783 elif ctx.files() and self.ui.verbose:
791 784 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
792 785 label='ui.note log.files')
793 786 if copies and self.ui.verbose:
794 787 copies = ['%s (%s)' % c for c in copies]
795 788 self.ui.write(_("copies: %s\n") % ' '.join(copies),
796 789 label='ui.note log.copies')
797 790
798 791 extra = ctx.extra()
799 792 if extra and self.ui.debugflag:
800 793 for key, value in sorted(extra.items()):
801 794 self.ui.write(_("extra: %s=%s\n")
802 795 % (key, value.encode('string_escape')),
803 796 label='ui.debug log.extra')
804 797
805 798 description = ctx.description().strip()
806 799 if description:
807 800 if self.ui.verbose:
808 801 self.ui.write(_("description:\n"),
809 802 label='ui.note log.description')
810 803 self.ui.write(description,
811 804 label='ui.note log.description')
812 805 self.ui.write("\n\n")
813 806 else:
814 807 self.ui.write(_("summary: %s\n") %
815 808 description.splitlines()[0],
816 809 label='log.summary')
817 810 self.ui.write("\n")
818 811
819 812 self.showpatch(changenode, matchfn)
820 813
821 814 def showpatch(self, node, matchfn):
822 815 if not matchfn:
823 816 matchfn = self.patch
824 817 if matchfn:
825 818 stat = self.diffopts.get('stat')
826 819 diff = self.diffopts.get('patch')
827 820 diffopts = patch.diffopts(self.ui, self.diffopts)
828 821 prev = self.repo.changelog.parents(node)[0]
829 822 if stat:
830 823 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
831 824 match=matchfn, stat=True)
832 825 if diff:
833 826 if stat:
834 827 self.ui.write("\n")
835 828 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
836 829 match=matchfn, stat=False)
837 830 self.ui.write("\n")
838 831
839 832 def _meaningful_parentrevs(self, log, rev):
840 833 """Return list of meaningful (or all if debug) parentrevs for rev.
841 834
842 835 For merges (two non-nullrev revisions) both parents are meaningful.
843 836 Otherwise the first parent revision is considered meaningful if it
844 837 is not the preceding revision.
845 838 """
846 839 parents = log.parentrevs(rev)
847 840 if not self.ui.debugflag and parents[1] == nullrev:
848 841 if parents[0] >= rev - 1:
849 842 parents = []
850 843 else:
851 844 parents = [parents[0]]
852 845 return parents
853 846
854 847
855 848 class changeset_templater(changeset_printer):
856 849 '''format changeset information.'''
857 850
858 851 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
859 852 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
860 853 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
861 854 defaulttempl = {
862 855 'parent': '{rev}:{node|formatnode} ',
863 856 'manifest': '{rev}:{node|formatnode}',
864 857 'file_copy': '{name} ({source})',
865 858 'extra': '{key}={value|stringescape}'
866 859 }
867 860 # filecopy is preserved for compatibility reasons
868 861 defaulttempl['filecopy'] = defaulttempl['file_copy']
869 862 self.t = templater.templater(mapfile, {'formatnode': formatnode},
870 863 cache=defaulttempl)
871 864 self.cache = {}
872 865
873 866 def use_template(self, t):
874 867 '''set template string to use'''
875 868 self.t.cache['changeset'] = t
876 869
877 870 def _meaningful_parentrevs(self, ctx):
878 871 """Return list of meaningful (or all if debug) parentrevs for rev.
879 872 """
880 873 parents = ctx.parents()
881 874 if len(parents) > 1:
882 875 return parents
883 876 if self.ui.debugflag:
884 877 return [parents[0], self.repo['null']]
885 878 if parents[0].rev() >= ctx.rev() - 1:
886 879 return []
887 880 return parents
888 881
889 882 def _show(self, ctx, copies, matchfn, props):
890 883 '''show a single changeset or file revision'''
891 884
892 885 showlist = templatekw.showlist
893 886
894 887 # showparents() behaviour depends on ui trace level which
895 888 # causes unexpected behaviours at templating level and makes
896 889 # it harder to extract it in a standalone function. Its
897 890 # behaviour cannot be changed so leave it here for now.
898 891 def showparents(**args):
899 892 ctx = args['ctx']
900 893 parents = [[('rev', p.rev()), ('node', p.hex())]
901 894 for p in self._meaningful_parentrevs(ctx)]
902 895 return showlist('parent', parents, **args)
903 896
904 897 props = props.copy()
905 898 props.update(templatekw.keywords)
906 899 props['parents'] = showparents
907 900 props['templ'] = self.t
908 901 props['ctx'] = ctx
909 902 props['repo'] = self.repo
910 903 props['revcache'] = {'copies': copies}
911 904 props['cache'] = self.cache
912 905
913 906 # find correct templates for current mode
914 907
915 908 tmplmodes = [
916 909 (True, None),
917 910 (self.ui.verbose, 'verbose'),
918 911 (self.ui.quiet, 'quiet'),
919 912 (self.ui.debugflag, 'debug'),
920 913 ]
921 914
922 915 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
923 916 for mode, postfix in tmplmodes:
924 917 for type in types:
925 918 cur = postfix and ('%s_%s' % (type, postfix)) or type
926 919 if mode and cur in self.t:
927 920 types[type] = cur
928 921
929 922 try:
930 923
931 924 # write header
932 925 if types['header']:
933 926 h = templater.stringify(self.t(types['header'], **props))
934 927 if self.buffered:
935 928 self.header[ctx.rev()] = h
936 929 else:
937 930 if self.lastheader != h:
938 931 self.lastheader = h
939 932 self.ui.write(h)
940 933
941 934 # write changeset metadata, then patch if requested
942 935 key = types['changeset']
943 936 self.ui.write(templater.stringify(self.t(key, **props)))
944 937 self.showpatch(ctx.node(), matchfn)
945 938
946 939 if types['footer']:
947 940 if not self.footer:
948 941 self.footer = templater.stringify(self.t(types['footer'],
949 942 **props))
950 943
951 944 except KeyError, inst:
952 945 msg = _("%s: no key named '%s'")
953 946 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
954 947 except SyntaxError, inst:
955 948 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
956 949
957 950 def show_changeset(ui, repo, opts, buffered=False):
958 951 """show one changeset using template or regular display.
959 952
960 953 Display format will be the first non-empty hit of:
961 954 1. option 'template'
962 955 2. option 'style'
963 956 3. [ui] setting 'logtemplate'
964 957 4. [ui] setting 'style'
965 958 If all of these values are either the unset or the empty string,
966 959 regular display via changeset_printer() is done.
967 960 """
968 961 # options
969 962 patch = False
970 963 if opts.get('patch') or opts.get('stat'):
971 964 patch = matchall(repo)
972 965
973 966 tmpl = opts.get('template')
974 967 style = None
975 968 if tmpl:
976 969 tmpl = templater.parsestring(tmpl, quoted=False)
977 970 else:
978 971 style = opts.get('style')
979 972
980 973 # ui settings
981 974 if not (tmpl or style):
982 975 tmpl = ui.config('ui', 'logtemplate')
983 976 if tmpl:
984 977 tmpl = templater.parsestring(tmpl)
985 978 else:
986 979 style = util.expandpath(ui.config('ui', 'style', ''))
987 980
988 981 if not (tmpl or style):
989 982 return changeset_printer(ui, repo, patch, opts, buffered)
990 983
991 984 mapfile = None
992 985 if style and not tmpl:
993 986 mapfile = style
994 987 if not os.path.split(mapfile)[0]:
995 988 mapname = (templater.templatepath('map-cmdline.' + mapfile)
996 989 or templater.templatepath(mapfile))
997 990 if mapname:
998 991 mapfile = mapname
999 992
1000 993 try:
1001 994 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1002 995 except SyntaxError, inst:
1003 996 raise util.Abort(inst.args[0])
1004 997 if tmpl:
1005 998 t.use_template(tmpl)
1006 999 return t
1007 1000
1008 1001 def finddate(ui, repo, date):
1009 1002 """Find the tipmost changeset that matches the given date spec"""
1010 1003
1011 1004 df = util.matchdate(date)
1012 1005 m = matchall(repo)
1013 1006 results = {}
1014 1007
1015 1008 def prep(ctx, fns):
1016 1009 d = ctx.date()
1017 1010 if df(d[0]):
1018 1011 results[ctx.rev()] = d
1019 1012
1020 1013 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1021 1014 rev = ctx.rev()
1022 1015 if rev in results:
1023 1016 ui.status(_("Found revision %s from %s\n") %
1024 1017 (rev, util.datestr(results[rev])))
1025 1018 return str(rev)
1026 1019
1027 1020 raise util.Abort(_("revision matching date not found"))
1028 1021
1029 1022 def walkchangerevs(repo, match, opts, prepare):
1030 1023 '''Iterate over files and the revs in which they changed.
1031 1024
1032 1025 Callers most commonly need to iterate backwards over the history
1033 1026 in which they are interested. Doing so has awful (quadratic-looking)
1034 1027 performance, so we use iterators in a "windowed" way.
1035 1028
1036 1029 We walk a window of revisions in the desired order. Within the
1037 1030 window, we first walk forwards to gather data, then in the desired
1038 1031 order (usually backwards) to display it.
1039 1032
1040 1033 This function returns an iterator yielding contexts. Before
1041 1034 yielding each context, the iterator will first call the prepare
1042 1035 function on each context in the window in forward order.'''
1043 1036
1044 1037 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1045 1038 if start < end:
1046 1039 while start < end:
1047 1040 yield start, min(windowsize, end - start)
1048 1041 start += windowsize
1049 1042 if windowsize < sizelimit:
1050 1043 windowsize *= 2
1051 1044 else:
1052 1045 while start > end:
1053 1046 yield start, min(windowsize, start - end - 1)
1054 1047 start -= windowsize
1055 1048 if windowsize < sizelimit:
1056 1049 windowsize *= 2
1057 1050
1058 1051 follow = opts.get('follow') or opts.get('follow_first')
1059 1052
1060 1053 if not len(repo):
1061 1054 return []
1062 1055
1063 1056 if follow:
1064 1057 defrange = '%s:0' % repo['.'].rev()
1065 1058 else:
1066 1059 defrange = '-1:0'
1067 1060 revs = revrange(repo, opts['rev'] or [defrange])
1068 1061 if not revs:
1069 1062 return []
1070 1063 wanted = set()
1071 1064 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1072 1065 fncache = {}
1073 1066 change = util.cachefunc(repo.changectx)
1074 1067
1075 1068 # First step is to fill wanted, the set of revisions that we want to yield.
1076 1069 # When it does not induce extra cost, we also fill fncache for revisions in
1077 1070 # wanted: a cache of filenames that were changed (ctx.files()) and that
1078 1071 # match the file filtering conditions.
1079 1072
1080 1073 if not slowpath and not match.files():
1081 1074 # No files, no patterns. Display all revs.
1082 1075 wanted = set(revs)
1083 1076 copies = []
1084 1077
1085 1078 if not slowpath:
1086 1079 # We only have to read through the filelog to find wanted revisions
1087 1080
1088 1081 minrev, maxrev = min(revs), max(revs)
1089 1082 def filerevgen(filelog, last):
1090 1083 """
1091 1084 Only files, no patterns. Check the history of each file.
1092 1085
1093 1086 Examines filelog entries within minrev, maxrev linkrev range
1094 1087 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1095 1088 tuples in backwards order
1096 1089 """
1097 1090 cl_count = len(repo)
1098 1091 revs = []
1099 1092 for j in xrange(0, last + 1):
1100 1093 linkrev = filelog.linkrev(j)
1101 1094 if linkrev < minrev:
1102 1095 continue
1103 1096 # only yield rev for which we have the changelog, it can
1104 1097 # happen while doing "hg log" during a pull or commit
1105 1098 if linkrev > maxrev or linkrev >= cl_count:
1106 1099 break
1107 1100
1108 1101 parentlinkrevs = []
1109 1102 for p in filelog.parentrevs(j):
1110 1103 if p != nullrev:
1111 1104 parentlinkrevs.append(filelog.linkrev(p))
1112 1105 n = filelog.node(j)
1113 1106 revs.append((linkrev, parentlinkrevs,
1114 1107 follow and filelog.renamed(n)))
1115 1108
1116 1109 return reversed(revs)
1117 1110 def iterfiles():
1118 1111 for filename in match.files():
1119 1112 yield filename, None
1120 1113 for filename_node in copies:
1121 1114 yield filename_node
1122 1115 for file_, node in iterfiles():
1123 1116 filelog = repo.file(file_)
1124 1117 if not len(filelog):
1125 1118 if node is None:
1126 1119 # A zero count may be a directory or deleted file, so
1127 1120 # try to find matching entries on the slow path.
1128 1121 if follow:
1129 1122 raise util.Abort(
1130 1123 _('cannot follow nonexistent file: "%s"') % file_)
1131 1124 slowpath = True
1132 1125 break
1133 1126 else:
1134 1127 continue
1135 1128
1136 1129 if node is None:
1137 1130 last = len(filelog) - 1
1138 1131 else:
1139 1132 last = filelog.rev(node)
1140 1133
1141 1134
1142 1135 # keep track of all ancestors of the file
1143 1136 ancestors = set([filelog.linkrev(last)])
1144 1137
1145 1138 # iterate from latest to oldest revision
1146 1139 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1147 1140 if rev not in ancestors:
1148 1141 continue
1149 1142 # XXX insert 1327 fix here
1150 1143 if flparentlinkrevs:
1151 1144 ancestors.update(flparentlinkrevs)
1152 1145
1153 1146 fncache.setdefault(rev, []).append(file_)
1154 1147 wanted.add(rev)
1155 1148 if copied:
1156 1149 copies.append(copied)
1157 1150 if slowpath:
1158 1151 # We have to read the changelog to match filenames against
1159 1152 # changed files
1160 1153
1161 1154 if follow:
1162 1155 raise util.Abort(_('can only follow copies/renames for explicit '
1163 1156 'filenames'))
1164 1157
1165 1158 # The slow path checks files modified in every changeset.
1166 1159 for i in sorted(revs):
1167 1160 ctx = change(i)
1168 1161 matches = filter(match, ctx.files())
1169 1162 if matches:
1170 1163 fncache[i] = matches
1171 1164 wanted.add(i)
1172 1165
1173 1166 class followfilter(object):
1174 1167 def __init__(self, onlyfirst=False):
1175 1168 self.startrev = nullrev
1176 1169 self.roots = set()
1177 1170 self.onlyfirst = onlyfirst
1178 1171
1179 1172 def match(self, rev):
1180 1173 def realparents(rev):
1181 1174 if self.onlyfirst:
1182 1175 return repo.changelog.parentrevs(rev)[0:1]
1183 1176 else:
1184 1177 return filter(lambda x: x != nullrev,
1185 1178 repo.changelog.parentrevs(rev))
1186 1179
1187 1180 if self.startrev == nullrev:
1188 1181 self.startrev = rev
1189 1182 return True
1190 1183
1191 1184 if rev > self.startrev:
1192 1185 # forward: all descendants
1193 1186 if not self.roots:
1194 1187 self.roots.add(self.startrev)
1195 1188 for parent in realparents(rev):
1196 1189 if parent in self.roots:
1197 1190 self.roots.add(rev)
1198 1191 return True
1199 1192 else:
1200 1193 # backwards: all parents
1201 1194 if not self.roots:
1202 1195 self.roots.update(realparents(self.startrev))
1203 1196 if rev in self.roots:
1204 1197 self.roots.remove(rev)
1205 1198 self.roots.update(realparents(rev))
1206 1199 return True
1207 1200
1208 1201 return False
1209 1202
1210 1203 # it might be worthwhile to do this in the iterator if the rev range
1211 1204 # is descending and the prune args are all within that range
1212 1205 for rev in opts.get('prune', ()):
1213 1206 rev = repo.changelog.rev(repo.lookup(rev))
1214 1207 ff = followfilter()
1215 1208 stop = min(revs[0], revs[-1])
1216 1209 for x in xrange(rev, stop - 1, -1):
1217 1210 if ff.match(x):
1218 1211 wanted.discard(x)
1219 1212
1220 1213 # Now that wanted is correctly initialized, we can iterate over the
1221 1214 # revision range, yielding only revisions in wanted.
1222 1215 def iterate():
1223 1216 if follow and not match.files():
1224 1217 ff = followfilter(onlyfirst=opts.get('follow_first'))
1225 1218 def want(rev):
1226 1219 return ff.match(rev) and rev in wanted
1227 1220 else:
1228 1221 def want(rev):
1229 1222 return rev in wanted
1230 1223
1231 1224 for i, window in increasing_windows(0, len(revs)):
1232 1225 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1233 1226 for rev in sorted(nrevs):
1234 1227 fns = fncache.get(rev)
1235 1228 ctx = change(rev)
1236 1229 if not fns:
1237 1230 def fns_generator():
1238 1231 for f in ctx.files():
1239 1232 if match(f):
1240 1233 yield f
1241 1234 fns = fns_generator()
1242 1235 prepare(ctx, fns)
1243 1236 for rev in nrevs:
1244 1237 yield change(rev)
1245 1238 return iterate()
1246 1239
1247 1240 def commit(ui, repo, commitfunc, pats, opts):
1248 1241 '''commit the specified files or all outstanding changes'''
1249 1242 date = opts.get('date')
1250 1243 if date:
1251 1244 opts['date'] = util.parsedate(date)
1252 1245 message = logmessage(opts)
1253 1246
1254 1247 # extract addremove carefully -- this function can be called from a command
1255 1248 # that doesn't support addremove
1256 1249 if opts.get('addremove'):
1257 1250 addremove(repo, pats, opts)
1258 1251
1259 1252 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1260 1253
1261 1254 def commiteditor(repo, ctx, subs):
1262 1255 if ctx.description():
1263 1256 return ctx.description()
1264 1257 return commitforceeditor(repo, ctx, subs)
1265 1258
1266 1259 def commitforceeditor(repo, ctx, subs):
1267 1260 edittext = []
1268 1261 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1269 1262 if ctx.description():
1270 1263 edittext.append(ctx.description())
1271 1264 edittext.append("")
1272 1265 edittext.append("") # Empty line between message and comments.
1273 1266 edittext.append(_("HG: Enter commit message."
1274 1267 " Lines beginning with 'HG:' are removed."))
1275 1268 edittext.append(_("HG: Leave message empty to abort commit."))
1276 1269 edittext.append("HG: --")
1277 1270 edittext.append(_("HG: user: %s") % ctx.user())
1278 1271 if ctx.p2():
1279 1272 edittext.append(_("HG: branch merge"))
1280 1273 if ctx.branch():
1281 1274 edittext.append(_("HG: branch '%s'")
1282 1275 % encoding.tolocal(ctx.branch()))
1283 1276 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1284 1277 edittext.extend([_("HG: added %s") % f for f in added])
1285 1278 edittext.extend([_("HG: changed %s") % f for f in modified])
1286 1279 edittext.extend([_("HG: removed %s") % f for f in removed])
1287 1280 if not added and not modified and not removed:
1288 1281 edittext.append(_("HG: no files changed"))
1289 1282 edittext.append("")
1290 1283 # run editor in the repository root
1291 1284 olddir = os.getcwd()
1292 1285 os.chdir(repo.root)
1293 1286 text = repo.ui.edit("\n".join(edittext), ctx.user())
1294 1287 text = re.sub("(?m)^HG:.*\n", "", text)
1295 1288 os.chdir(olddir)
1296 1289
1297 1290 if not text.strip():
1298 1291 raise util.Abort(_("empty commit message"))
1299 1292
1300 1293 return text
@@ -1,1870 +1,1863 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25 25
26 26 def __init__(self, baseui, path=None, create=0):
27 27 repo.repository.__init__(self)
28 28 self.root = os.path.realpath(util.expandpath(path))
29 29 self.path = os.path.join(self.root, ".hg")
30 30 self.origroot = path
31 31 self.auditor = util.path_auditor(self.root, self._checknested)
32 32 self.opener = util.opener(self.path)
33 33 self.wopener = util.opener(self.root)
34 34 self.baseui = baseui
35 35 self.ui = baseui.copy()
36 36
37 37 try:
38 38 self.ui.readconfig(self.join("hgrc"), self.root)
39 39 extensions.loadall(self.ui)
40 40 except IOError:
41 41 pass
42 42
43 43 if not os.path.isdir(self.path):
44 44 if create:
45 45 if not os.path.exists(path):
46 46 util.makedirs(path)
47 47 os.mkdir(self.path)
48 48 requirements = ["revlogv1"]
49 49 if self.ui.configbool('format', 'usestore', True):
50 50 os.mkdir(os.path.join(self.path, "store"))
51 51 requirements.append("store")
52 52 if self.ui.configbool('format', 'usefncache', True):
53 53 requirements.append("fncache")
54 54 # create an invalid changelog
55 55 self.opener("00changelog.i", "a").write(
56 56 '\0\0\0\2' # represents revlogv2
57 57 ' dummy changelog to prevent using the old repo layout'
58 58 )
59 59 if self.ui.configbool('format', 'parentdelta', False):
60 60 requirements.append("parentdelta")
61 61 reqfile = self.opener("requires", "w")
62 62 for r in requirements:
63 63 reqfile.write("%s\n" % r)
64 64 reqfile.close()
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self.sopener.options = {}
97 97 if 'parentdelta' in requirements:
98 98 self.sopener.options['parentdelta'] = 1
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None # in UTF-8
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _checknested(self, path):
116 116 """Determine if path is a legal nested repository."""
117 117 if not path.startswith(self.root):
118 118 return False
119 119 subpath = path[len(self.root) + 1:]
120 120
121 121 # XXX: Checking against the current working copy is wrong in
122 122 # the sense that it can reject things like
123 123 #
124 124 # $ hg cat -r 10 sub/x.txt
125 125 #
126 126 # if sub/ is no longer a subrepository in the working copy
127 127 # parent revision.
128 128 #
129 129 # However, it can of course also allow things that would have
130 130 # been rejected before, such as the above cat command if sub/
131 131 # is a subrepository now, but was a normal directory before.
132 132 # The old path auditor would have rejected by mistake since it
133 133 # panics when it sees sub/.hg/.
134 134 #
135 135 # All in all, checking against the working copy seems sensible
136 136 # since we want to prevent access to nested repositories on
137 137 # the filesystem *now*.
138 138 ctx = self[None]
139 139 parts = util.splitpath(subpath)
140 140 while parts:
141 141 prefix = os.sep.join(parts)
142 142 if prefix in ctx.substate:
143 143 if prefix == subpath:
144 144 return True
145 145 else:
146 146 sub = ctx.sub(prefix)
147 147 return sub.checknested(subpath[len(prefix) + 1:])
148 148 else:
149 149 parts.pop()
150 150 return False
151 151
152 152
153 153 @propertycache
154 154 def changelog(self):
155 155 c = changelog.changelog(self.sopener)
156 156 if 'HG_PENDING' in os.environ:
157 157 p = os.environ['HG_PENDING']
158 158 if p.startswith(self.root):
159 159 c.readpending('00changelog.i.a')
160 160 self.sopener.options['defversion'] = c.version
161 161 return c
162 162
163 163 @propertycache
164 164 def manifest(self):
165 165 return manifest.manifest(self.sopener)
166 166
167 167 @propertycache
168 168 def dirstate(self):
169 169 return dirstate.dirstate(self.opener, self.ui, self.root)
170 170
171 171 def __getitem__(self, changeid):
172 172 if changeid is None:
173 173 return context.workingctx(self)
174 174 return context.changectx(self, changeid)
175 175
176 176 def __contains__(self, changeid):
177 177 try:
178 178 return bool(self.lookup(changeid))
179 179 except error.RepoLookupError:
180 180 return False
181 181
182 182 def __nonzero__(self):
183 183 return True
184 184
185 185 def __len__(self):
186 186 return len(self.changelog)
187 187
188 188 def __iter__(self):
189 189 for i in xrange(len(self)):
190 190 yield i
191 191
192 192 def url(self):
193 193 return 'file:' + self.root
194 194
195 195 def hook(self, name, throw=False, **args):
196 196 return hook.hook(self.ui, self, name, throw, **args)
197 197
198 198 tag_disallowed = ':\r\n'
199 199
200 200 def _tag(self, names, node, message, local, user, date, extra={}):
201 201 if isinstance(names, str):
202 202 allchars = names
203 203 names = (names,)
204 204 else:
205 205 allchars = ''.join(names)
206 206 for c in self.tag_disallowed:
207 207 if c in allchars:
208 208 raise util.Abort(_('%r cannot be used in a tag name') % c)
209 209
210 210 branches = self.branchmap()
211 211 for name in names:
212 212 self.hook('pretag', throw=True, node=hex(node), tag=name,
213 213 local=local)
214 214 if name in branches:
215 215 self.ui.warn(_("warning: tag %s conflicts with existing"
216 216 " branch name\n") % name)
217 217
218 218 def writetags(fp, names, munge, prevtags):
219 219 fp.seek(0, 2)
220 220 if prevtags and prevtags[-1] != '\n':
221 221 fp.write('\n')
222 222 for name in names:
223 223 m = munge and munge(name) or name
224 224 if self._tagtypes and name in self._tagtypes:
225 225 old = self._tags.get(name, nullid)
226 226 fp.write('%s %s\n' % (hex(old), m))
227 227 fp.write('%s %s\n' % (hex(node), m))
228 228 fp.close()
229 229
230 230 prevtags = ''
231 231 if local:
232 232 try:
233 233 fp = self.opener('localtags', 'r+')
234 234 except IOError:
235 235 fp = self.opener('localtags', 'a')
236 236 else:
237 237 prevtags = fp.read()
238 238
239 239 # local tags are stored in the current charset
240 240 writetags(fp, names, None, prevtags)
241 241 for name in names:
242 242 self.hook('tag', node=hex(node), tag=name, local=local)
243 243 return
244 244
245 245 try:
246 246 fp = self.wfile('.hgtags', 'rb+')
247 247 except IOError:
248 248 fp = self.wfile('.hgtags', 'ab')
249 249 else:
250 250 prevtags = fp.read()
251 251
252 252 # committed tags are stored in UTF-8
253 253 writetags(fp, names, encoding.fromlocal, prevtags)
254 254
255 255 if '.hgtags' not in self.dirstate:
256 256 self[None].add(['.hgtags'])
257 257
258 258 m = matchmod.exact(self.root, '', ['.hgtags'])
259 259 tagnode = self.commit(message, user, date, extra=extra, match=m)
260 260
261 261 for name in names:
262 262 self.hook('tag', node=hex(node), tag=name, local=local)
263 263
264 264 return tagnode
265 265
266 266 def tag(self, names, node, message, local, user, date):
267 267 '''tag a revision with one or more symbolic names.
268 268
269 269 names is a list of strings or, when adding a single tag, names may be a
270 270 string.
271 271
272 272 if local is True, the tags are stored in a per-repository file.
273 273 otherwise, they are stored in the .hgtags file, and a new
274 274 changeset is committed with the change.
275 275
276 276 keyword arguments:
277 277
278 278 local: whether to store tags in non-version-controlled file
279 279 (default False)
280 280
281 281 message: commit message to use if committing
282 282
283 283 user: name of user to use if committing
284 284
285 285 date: date tuple to use if committing'''
286 286
287 287 for x in self.status()[:5]:
288 288 if '.hgtags' in x:
289 289 raise util.Abort(_('working copy of .hgtags is changed '
290 290 '(please commit .hgtags manually)'))
291 291
292 292 self.tags() # instantiate the cache
293 293 self._tag(names, node, message, local, user, date)
294 294
295 295 def tags(self):
296 296 '''return a mapping of tag to node'''
297 297 if self._tags is None:
298 298 (self._tags, self._tagtypes) = self._findtags()
299 299
300 300 return self._tags
301 301
302 302 def _findtags(self):
303 303 '''Do the hard work of finding tags. Return a pair of dicts
304 304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
305 305 maps tag name to a string like \'global\' or \'local\'.
306 306 Subclasses or extensions are free to add their own tags, but
307 307 should be aware that the returned dicts will be retained for the
308 308 duration of the localrepo object.'''
309 309
310 310 # XXX what tagtype should subclasses/extensions use? Currently
311 311 # mq and bookmarks add tags, but do not set the tagtype at all.
312 312 # Should each extension invent its own tag type? Should there
313 313 # be one tagtype for all such "virtual" tags? Or is the status
314 314 # quo fine?
315 315
316 316 alltags = {} # map tag name to (node, hist)
317 317 tagtypes = {}
318 318
319 319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
320 320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
321 321
322 322 # Build the return dicts. Have to re-encode tag names because
323 323 # the tags module always uses UTF-8 (in order not to lose info
324 324 # writing to the cache), but the rest of Mercurial wants them in
325 325 # local encoding.
326 326 tags = {}
327 327 for (name, (node, hist)) in alltags.iteritems():
328 328 if node != nullid:
329 329 tags[encoding.tolocal(name)] = node
330 330 tags['tip'] = self.changelog.tip()
331 331 tagtypes = dict([(encoding.tolocal(name), value)
332 332 for (name, value) in tagtypes.iteritems()])
333 333 return (tags, tagtypes)
334 334
335 335 def tagtype(self, tagname):
336 336 '''
337 337 return the type of the given tag. result can be:
338 338
339 339 'local' : a local tag
340 340 'global' : a global tag
341 341 None : tag does not exist
342 342 '''
343 343
344 344 self.tags()
345 345
346 346 return self._tagtypes.get(tagname)
347 347
348 348 def tagslist(self):
349 349 '''return a list of tags ordered by revision'''
350 350 l = []
351 351 for t, n in self.tags().iteritems():
352 352 try:
353 353 r = self.changelog.rev(n)
354 354 except:
355 355 r = -2 # sort to the beginning of the list if unknown
356 356 l.append((r, t, n))
357 357 return [(t, n) for r, t, n in sorted(l)]
358 358
359 359 def nodetags(self, node):
360 360 '''return the tags associated with a node'''
361 361 if not self.nodetagscache:
362 362 self.nodetagscache = {}
363 363 for t, n in self.tags().iteritems():
364 364 self.nodetagscache.setdefault(n, []).append(t)
365 365 for tags in self.nodetagscache.itervalues():
366 366 tags.sort()
367 367 return self.nodetagscache.get(node, [])
368 368
369 369 def _branchtags(self, partial, lrev):
370 370 # TODO: rename this function?
371 371 tiprev = len(self) - 1
372 372 if lrev != tiprev:
373 373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
374 374 self._updatebranchcache(partial, ctxgen)
375 375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376 376
377 377 return partial
378 378
379 379 def updatebranchcache(self):
380 380 tip = self.changelog.tip()
381 381 if self._branchcache is not None and self._branchcachetip == tip:
382 382 return self._branchcache
383 383
384 384 oldtip = self._branchcachetip
385 385 self._branchcachetip = tip
386 386 if oldtip is None or oldtip not in self.changelog.nodemap:
387 387 partial, last, lrev = self._readbranchcache()
388 388 else:
389 389 lrev = self.changelog.rev(oldtip)
390 390 partial = self._branchcache
391 391
392 392 self._branchtags(partial, lrev)
393 393 # this private cache holds all heads (not just tips)
394 394 self._branchcache = partial
395 395
396 396 def branchmap(self):
397 397 '''returns a dictionary {branch: [branchheads]}'''
398 398 self.updatebranchcache()
399 399 return self._branchcache
400 400
401 401 def branchtags(self):
402 402 '''return a dict where branch names map to the tipmost head of
403 403 the branch, open heads come before closed'''
404 404 bt = {}
405 405 for bn, heads in self.branchmap().iteritems():
406 406 tip = heads[-1]
407 407 for h in reversed(heads):
408 408 if 'close' not in self.changelog.read(h)[5]:
409 409 tip = h
410 410 break
411 411 bt[bn] = tip
412 412 return bt
413 413
414 414
415 415 def _readbranchcache(self):
416 416 partial = {}
417 417 try:
418 418 f = self.opener("branchheads.cache")
419 419 lines = f.read().split('\n')
420 420 f.close()
421 421 except (IOError, OSError):
422 422 return {}, nullid, nullrev
423 423
424 424 try:
425 425 last, lrev = lines.pop(0).split(" ", 1)
426 426 last, lrev = bin(last), int(lrev)
427 427 if lrev >= len(self) or self[lrev].node() != last:
428 428 # invalidate the cache
429 429 raise ValueError('invalidating branch cache (tip differs)')
430 430 for l in lines:
431 431 if not l:
432 432 continue
433 433 node, label = l.split(" ", 1)
434 434 partial.setdefault(label.strip(), []).append(bin(node))
435 435 except KeyboardInterrupt:
436 436 raise
437 437 except Exception, inst:
438 438 if self.ui.debugflag:
439 439 self.ui.warn(str(inst), '\n')
440 440 partial, last, lrev = {}, nullid, nullrev
441 441 return partial, last, lrev
442 442
443 443 def _writebranchcache(self, branches, tip, tiprev):
444 444 try:
445 445 f = self.opener("branchheads.cache", "w", atomictemp=True)
446 446 f.write("%s %s\n" % (hex(tip), tiprev))
447 447 for label, nodes in branches.iteritems():
448 448 for node in nodes:
449 449 f.write("%s %s\n" % (hex(node), label))
450 450 f.rename()
451 451 except (IOError, OSError):
452 452 pass
453 453
454 454 def _updatebranchcache(self, partial, ctxgen):
455 455 # collect new branch entries
456 456 newbranches = {}
457 457 for c in ctxgen:
458 458 newbranches.setdefault(c.branch(), []).append(c.node())
459 459 # if older branchheads are reachable from new ones, they aren't
460 460 # really branchheads. Note checking parents is insufficient:
461 461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
462 462 for branch, newnodes in newbranches.iteritems():
463 463 bheads = partial.setdefault(branch, [])
464 464 bheads.extend(newnodes)
465 465 if len(bheads) <= 1:
466 466 continue
467 467 # starting from tip means fewer passes over reachable
468 468 while newnodes:
469 469 latest = newnodes.pop()
470 470 if latest not in bheads:
471 471 continue
472 472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
473 473 reachable = self.changelog.reachable(latest, minbhrev)
474 474 reachable.remove(latest)
475 475 bheads = [b for b in bheads if b not in reachable]
476 476 partial[branch] = bheads
477 477
478 478 def lookup(self, key):
479 479 if isinstance(key, int):
480 480 return self.changelog.node(key)
481 481 elif key == '.':
482 482 return self.dirstate.parents()[0]
483 483 elif key == 'null':
484 484 return nullid
485 485 elif key == 'tip':
486 486 return self.changelog.tip()
487 487 n = self.changelog._match(key)
488 488 if n:
489 489 return n
490 490 if key in self.tags():
491 491 return self.tags()[key]
492 492 if key in self.branchtags():
493 493 return self.branchtags()[key]
494 494 n = self.changelog._partialmatch(key)
495 495 if n:
496 496 return n
497 497
498 498 # can't find key, check if it might have come from damaged dirstate
499 499 if key in self.dirstate.parents():
500 500 raise error.Abort(_("working directory has unknown parent '%s'!")
501 501 % short(key))
502 502 try:
503 503 if len(key) == 20:
504 504 key = hex(key)
505 505 except:
506 506 pass
507 507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
508 508
509 509 def lookupbranch(self, key, remote=None):
510 510 repo = remote or self
511 511 if key in repo.branchmap():
512 512 return key
513 513
514 514 repo = (remote and remote.local()) and remote or self
515 515 return repo[key].branch()
516 516
517 517 def local(self):
518 518 return True
519 519
520 520 def join(self, f):
521 521 return os.path.join(self.path, f)
522 522
523 523 def wjoin(self, f):
524 524 return os.path.join(self.root, f)
525 525
526 526 def file(self, f):
527 527 if f[0] == '/':
528 528 f = f[1:]
529 529 return filelog.filelog(self.sopener, f)
530 530
531 531 def changectx(self, changeid):
532 532 return self[changeid]
533 533
534 534 def parents(self, changeid=None):
535 535 '''get list of changectxs for parents of changeid'''
536 536 return self[changeid].parents()
537 537
538 538 def filectx(self, path, changeid=None, fileid=None):
539 539 """changeid can be a changeset revision, node, or tag.
540 540 fileid can be a file revision or node."""
541 541 return context.filectx(self, path, changeid, fileid)
542 542
543 543 def getcwd(self):
544 544 return self.dirstate.getcwd()
545 545
546 546 def pathto(self, f, cwd=None):
547 547 return self.dirstate.pathto(f, cwd)
548 548
549 549 def wfile(self, f, mode='r'):
550 550 return self.wopener(f, mode)
551 551
552 552 def _link(self, f):
553 553 return os.path.islink(self.wjoin(f))
554 554
555 555 def _loadfilter(self, filter):
556 556 if filter not in self.filterpats:
557 557 l = []
558 558 for pat, cmd in self.ui.configitems(filter):
559 559 if cmd == '!':
560 560 continue
561 561 mf = matchmod.match(self.root, '', [pat])
562 562 fn = None
563 563 params = cmd
564 564 for name, filterfn in self._datafilters.iteritems():
565 565 if cmd.startswith(name):
566 566 fn = filterfn
567 567 params = cmd[len(name):].lstrip()
568 568 break
569 569 if not fn:
570 570 fn = lambda s, c, **kwargs: util.filter(s, c)
571 571 # Wrap old filters not supporting keyword arguments
572 572 if not inspect.getargspec(fn)[2]:
573 573 oldfn = fn
574 574 fn = lambda s, c, **kwargs: oldfn(s, c)
575 575 l.append((mf, fn, params))
576 576 self.filterpats[filter] = l
577 577
578 578 def _filter(self, filter, filename, data):
579 579 self._loadfilter(filter)
580 580
581 581 for mf, fn, cmd in self.filterpats[filter]:
582 582 if mf(filename):
583 583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
584 584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
585 585 break
586 586
587 587 return data
588 588
589 589 def adddatafilter(self, name, filter):
590 590 self._datafilters[name] = filter
591 591
592 592 def wread(self, filename):
593 593 if self._link(filename):
594 594 data = os.readlink(self.wjoin(filename))
595 595 else:
596 596 data = self.wopener(filename, 'r').read()
597 597 return self._filter("encode", filename, data)
598 598
599 599 def wwrite(self, filename, data, flags):
600 600 data = self._filter("decode", filename, data)
601 601 try:
602 602 os.unlink(self.wjoin(filename))
603 603 except OSError:
604 604 pass
605 605 if 'l' in flags:
606 606 self.wopener.symlink(data, filename)
607 607 else:
608 608 self.wopener(filename, 'w').write(data)
609 609 if 'x' in flags:
610 610 util.set_flags(self.wjoin(filename), False, True)
611 611
612 612 def wwritedata(self, filename, data):
613 613 return self._filter("decode", filename, data)
614 614
615 615 def transaction(self, desc):
616 616 tr = self._transref and self._transref() or None
617 617 if tr and tr.running():
618 618 return tr.nest()
619 619
620 620 # abort here if the journal already exists
621 621 if os.path.exists(self.sjoin("journal")):
622 622 raise error.RepoError(
623 623 _("abandoned transaction found - run hg recover"))
624 624
625 625 # save dirstate for rollback
626 626 try:
627 627 ds = self.opener("dirstate").read()
628 628 except IOError:
629 629 ds = ""
630 630 self.opener("journal.dirstate", "w").write(ds)
631 631 self.opener("journal.branch", "w").write(self.dirstate.branch())
632 632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
633 633
634 634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
635 635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
636 636 (self.join("journal.branch"), self.join("undo.branch")),
637 637 (self.join("journal.desc"), self.join("undo.desc"))]
638 638 tr = transaction.transaction(self.ui.warn, self.sopener,
639 639 self.sjoin("journal"),
640 640 aftertrans(renames),
641 641 self.store.createmode)
642 642 self._transref = weakref.ref(tr)
643 643 return tr
644 644
645 645 def recover(self):
646 646 lock = self.lock()
647 647 try:
648 648 if os.path.exists(self.sjoin("journal")):
649 649 self.ui.status(_("rolling back interrupted transaction\n"))
650 650 transaction.rollback(self.sopener, self.sjoin("journal"),
651 651 self.ui.warn)
652 652 self.invalidate()
653 653 return True
654 654 else:
655 655 self.ui.warn(_("no interrupted transaction available\n"))
656 656 return False
657 657 finally:
658 658 lock.release()
659 659
660 660 def rollback(self, dryrun=False):
661 661 wlock = lock = None
662 662 try:
663 663 wlock = self.wlock()
664 664 lock = self.lock()
665 665 if os.path.exists(self.sjoin("undo")):
666 666 try:
667 667 args = self.opener("undo.desc", "r").read().splitlines()
668 668 if len(args) >= 3 and self.ui.verbose:
669 669 desc = _("rolling back to revision %s"
670 670 " (undo %s: %s)\n") % (
671 671 int(args[0]) - 1, args[1], args[2])
672 672 elif len(args) >= 2:
673 673 desc = _("rolling back to revision %s (undo %s)\n") % (
674 674 int(args[0]) - 1, args[1])
675 675 except IOError:
676 676 desc = _("rolling back unknown transaction\n")
677 677 self.ui.status(desc)
678 678 if dryrun:
679 679 return
680 680 transaction.rollback(self.sopener, self.sjoin("undo"),
681 681 self.ui.warn)
682 682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
683 683 try:
684 684 branch = self.opener("undo.branch").read()
685 685 self.dirstate.setbranch(branch)
686 686 except IOError:
687 687 self.ui.warn(_("Named branch could not be reset, "
688 688 "current branch still is: %s\n")
689 689 % encoding.tolocal(self.dirstate.branch()))
690 690 self.invalidate()
691 691 self.dirstate.invalidate()
692 692 self.destroyed()
693 693 else:
694 694 self.ui.warn(_("no rollback information available\n"))
695 695 return 1
696 696 finally:
697 697 release(lock, wlock)
698 698
699 699 def invalidatecaches(self):
700 700 self._tags = None
701 701 self._tagtypes = None
702 702 self.nodetagscache = None
703 703 self._branchcache = None # in UTF-8
704 704 self._branchcachetip = None
705 705
706 706 def invalidate(self):
707 707 for a in "changelog manifest".split():
708 708 if a in self.__dict__:
709 709 delattr(self, a)
710 710 self.invalidatecaches()
711 711
712 712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
713 713 try:
714 714 l = lock.lock(lockname, 0, releasefn, desc=desc)
715 715 except error.LockHeld, inst:
716 716 if not wait:
717 717 raise
718 718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
719 719 (desc, inst.locker))
720 720 # default to 600 seconds timeout
721 721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
722 722 releasefn, desc=desc)
723 723 if acquirefn:
724 724 acquirefn()
725 725 return l
726 726
727 727 def lock(self, wait=True):
728 728 '''Lock the repository store (.hg/store) and return a weak reference
729 729 to the lock. Use this before modifying the store (e.g. committing or
730 730 stripping). If you are opening a transaction, get a lock as well.)'''
731 731 l = self._lockref and self._lockref()
732 732 if l is not None and l.held:
733 733 l.lock()
734 734 return l
735 735
736 736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
737 737 _('repository %s') % self.origroot)
738 738 self._lockref = weakref.ref(l)
739 739 return l
740 740
741 741 def wlock(self, wait=True):
742 742 '''Lock the non-store parts of the repository (everything under
743 743 .hg except .hg/store) and return a weak reference to the lock.
744 744 Use this before modifying files in .hg.'''
745 745 l = self._wlockref and self._wlockref()
746 746 if l is not None and l.held:
747 747 l.lock()
748 748 return l
749 749
750 750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
751 751 self.dirstate.invalidate, _('working directory of %s') %
752 752 self.origroot)
753 753 self._wlockref = weakref.ref(l)
754 754 return l
755 755
756 756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
757 757 """
758 758 commit an individual file as part of a larger transaction
759 759 """
760 760
761 761 fname = fctx.path()
762 762 text = fctx.data()
763 763 flog = self.file(fname)
764 764 fparent1 = manifest1.get(fname, nullid)
765 765 fparent2 = fparent2o = manifest2.get(fname, nullid)
766 766
767 767 meta = {}
768 768 copy = fctx.renamed()
769 769 if copy and copy[0] != fname:
770 770 # Mark the new revision of this file as a copy of another
771 771 # file. This copy data will effectively act as a parent
772 772 # of this new revision. If this is a merge, the first
773 773 # parent will be the nullid (meaning "look up the copy data")
774 774 # and the second one will be the other parent. For example:
775 775 #
776 776 # 0 --- 1 --- 3 rev1 changes file foo
777 777 # \ / rev2 renames foo to bar and changes it
778 778 # \- 2 -/ rev3 should have bar with all changes and
779 779 # should record that bar descends from
780 780 # bar in rev2 and foo in rev1
781 781 #
782 782 # this allows this merge to succeed:
783 783 #
784 784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
785 785 # \ / merging rev3 and rev4 should use bar@rev2
786 786 # \- 2 --- 4 as the merge base
787 787 #
788 788
789 789 cfname = copy[0]
790 790 crev = manifest1.get(cfname)
791 791 newfparent = fparent2
792 792
793 793 if manifest2: # branch merge
794 794 if fparent2 == nullid or crev is None: # copied on remote side
795 795 if cfname in manifest2:
796 796 crev = manifest2[cfname]
797 797 newfparent = fparent1
798 798
799 799 # find source in nearest ancestor if we've lost track
800 800 if not crev:
801 801 self.ui.debug(" %s: searching for copy revision for %s\n" %
802 802 (fname, cfname))
803 803 for ancestor in self['.'].ancestors():
804 804 if cfname in ancestor:
805 805 crev = ancestor[cfname].filenode()
806 806 break
807 807
808 808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
809 809 meta["copy"] = cfname
810 810 meta["copyrev"] = hex(crev)
811 811 fparent1, fparent2 = nullid, newfparent
812 812 elif fparent2 != nullid:
813 813 # is one parent an ancestor of the other?
814 814 fparentancestor = flog.ancestor(fparent1, fparent2)
815 815 if fparentancestor == fparent1:
816 816 fparent1, fparent2 = fparent2, nullid
817 817 elif fparentancestor == fparent2:
818 818 fparent2 = nullid
819 819
820 820 # is the file changed?
821 821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
822 822 changelist.append(fname)
823 823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
824 824
825 825 # are just the flags changed during merge?
826 826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
827 827 changelist.append(fname)
828 828
829 829 return fparent1
830 830
831 831 def commit(self, text="", user=None, date=None, match=None, force=False,
832 832 editor=False, extra={}):
833 833 """Add a new revision to current repository.
834 834
835 835 Revision information is gathered from the working directory,
836 836 match can be used to filter the committed files. If editor is
837 837 supplied, it is called to get a commit message.
838 838 """
839 839
840 840 def fail(f, msg):
841 841 raise util.Abort('%s: %s' % (f, msg))
842 842
843 843 if not match:
844 844 match = matchmod.always(self.root, '')
845 845
846 846 if not force:
847 847 vdirs = []
848 848 match.dir = vdirs.append
849 849 match.bad = fail
850 850
851 851 wlock = self.wlock()
852 852 try:
853 853 wctx = self[None]
854 854 merge = len(wctx.parents()) > 1
855 855
856 856 if (not force and merge and match and
857 857 (match.files() or match.anypats())):
858 858 raise util.Abort(_('cannot partially commit a merge '
859 859 '(do not specify files or patterns)'))
860 860
861 861 changes = self.status(match=match, clean=force)
862 862 if force:
863 863 changes[0].extend(changes[6]) # mq may commit unchanged files
864 864
865 865 # check subrepos
866 866 subs = []
867 867 removedsubs = set()
868 868 for p in wctx.parents():
869 869 removedsubs.update(s for s in p.substate if match(s))
870 870 for s in wctx.substate:
871 871 removedsubs.discard(s)
872 872 if match(s) and wctx.sub(s).dirty():
873 873 subs.append(s)
874 874 if (subs or removedsubs):
875 875 if (not match('.hgsub') and
876 876 '.hgsub' in (wctx.modified() + wctx.added())):
877 877 raise util.Abort(_("can't commit subrepos without .hgsub"))
878 878 if '.hgsubstate' not in changes[0]:
879 879 changes[0].insert(0, '.hgsubstate')
880 880
881 881 # make sure all explicit patterns are matched
882 882 if not force and match.files():
883 883 matched = set(changes[0] + changes[1] + changes[2])
884 884
885 885 for f in match.files():
886 886 if f == '.' or f in matched or f in wctx.substate:
887 887 continue
888 888 if f in changes[3]: # missing
889 889 fail(f, _('file not found!'))
890 890 if f in vdirs: # visited directory
891 891 d = f + '/'
892 892 for mf in matched:
893 893 if mf.startswith(d):
894 894 break
895 895 else:
896 896 fail(f, _("no match under directory!"))
897 897 elif f not in self.dirstate:
898 898 fail(f, _("file not tracked!"))
899 899
900 900 if (not force and not extra.get("close") and not merge
901 901 and not (changes[0] or changes[1] or changes[2])
902 902 and wctx.branch() == wctx.p1().branch()):
903 903 return None
904 904
905 905 ms = mergemod.mergestate(self)
906 906 for f in changes[0]:
907 907 if f in ms and ms[f] == 'u':
908 908 raise util.Abort(_("unresolved merge conflicts "
909 909 "(see hg resolve)"))
910 910
911 911 cctx = context.workingctx(self, text, user, date, extra, changes)
912 912 if editor:
913 913 cctx._text = editor(self, cctx, subs)
914 914 edited = (text != cctx._text)
915 915
916 916 # commit subs
917 917 if subs or removedsubs:
918 918 state = wctx.substate.copy()
919 919 for s in sorted(subs):
920 920 sub = wctx.sub(s)
921 921 self.ui.status(_('committing subrepository %s\n') %
922 922 subrepo.relpath(sub))
923 923 sr = sub.commit(cctx._text, user, date)
924 924 state[s] = (state[s][0], sr)
925 925 subrepo.writestate(self, state)
926 926
927 927 # Save commit message in case this transaction gets rolled back
928 928 # (e.g. by a pretxncommit hook). Leave the content alone on
929 929 # the assumption that the user will use the same editor again.
930 930 msgfile = self.opener('last-message.txt', 'wb')
931 931 msgfile.write(cctx._text)
932 932 msgfile.close()
933 933
934 934 p1, p2 = self.dirstate.parents()
935 935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
936 936 try:
937 937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
938 938 ret = self.commitctx(cctx, True)
939 939 except:
940 940 if edited:
941 941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
942 942 self.ui.write(
943 943 _('note: commit message saved in %s\n') % msgfn)
944 944 raise
945 945
946 946 # update dirstate and mergestate
947 947 for f in changes[0] + changes[1]:
948 948 self.dirstate.normal(f)
949 949 for f in changes[2]:
950 950 self.dirstate.forget(f)
951 951 self.dirstate.setparents(ret)
952 952 ms.reset()
953 953 finally:
954 954 wlock.release()
955 955
956 956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
957 957 return ret
958 958
959 959 def commitctx(self, ctx, error=False):
960 960 """Add a new revision to current repository.
961 961 Revision information is passed via the context argument.
962 962 """
963 963
964 964 tr = lock = None
965 965 removed = ctx.removed()
966 966 p1, p2 = ctx.p1(), ctx.p2()
967 967 m1 = p1.manifest().copy()
968 968 m2 = p2.manifest()
969 969 user = ctx.user()
970 970
971 971 lock = self.lock()
972 972 try:
973 973 tr = self.transaction("commit")
974 974 trp = weakref.proxy(tr)
975 975
976 976 # check in files
977 977 new = {}
978 978 changed = []
979 979 linkrev = len(self)
980 980 for f in sorted(ctx.modified() + ctx.added()):
981 981 self.ui.note(f + "\n")
982 982 try:
983 983 fctx = ctx[f]
984 984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
985 985 changed)
986 986 m1.set(f, fctx.flags())
987 987 except OSError, inst:
988 988 self.ui.warn(_("trouble committing %s!\n") % f)
989 989 raise
990 990 except IOError, inst:
991 991 errcode = getattr(inst, 'errno', errno.ENOENT)
992 992 if error or errcode and errcode != errno.ENOENT:
993 993 self.ui.warn(_("trouble committing %s!\n") % f)
994 994 raise
995 995 else:
996 996 removed.append(f)
997 997
998 998 # update manifest
999 999 m1.update(new)
1000 1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1001 1001 drop = [f for f in removed if f in m1]
1002 1002 for f in drop:
1003 1003 del m1[f]
1004 1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1005 1005 p2.manifestnode(), (new, drop))
1006 1006
1007 1007 # update changelog
1008 1008 self.changelog.delayupdate()
1009 1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
1010 1010 trp, p1.node(), p2.node(),
1011 1011 user, ctx.date(), ctx.extra().copy())
1012 1012 p = lambda: self.changelog.writepending() and self.root or ""
1013 1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1014 1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1015 1015 parent2=xp2, pending=p)
1016 1016 self.changelog.finalize(trp)
1017 1017 tr.close()
1018 1018
1019 1019 if self._branchcache:
1020 1020 self.updatebranchcache()
1021 1021 return n
1022 1022 finally:
1023 1023 if tr:
1024 1024 tr.release()
1025 1025 lock.release()
1026 1026
1027 1027 def destroyed(self):
1028 1028 '''Inform the repository that nodes have been destroyed.
1029 1029 Intended for use by strip and rollback, so there's a common
1030 1030 place for anything that has to be done after destroying history.'''
1031 1031 # XXX it might be nice if we could take the list of destroyed
1032 1032 # nodes, but I don't see an easy way for rollback() to do that
1033 1033
1034 1034 # Ensure the persistent tag cache is updated. Doing it now
1035 1035 # means that the tag cache only has to worry about destroyed
1036 1036 # heads immediately after a strip/rollback. That in turn
1037 1037 # guarantees that "cachetip == currenttip" (comparing both rev
1038 1038 # and node) always means no nodes have been added or destroyed.
1039 1039
1040 1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1041 1041 # head, refresh the tag cache, then immediately add a new head.
1042 1042 # But I think doing it this way is necessary for the "instant
1043 1043 # tag cache retrieval" case to work.
1044 1044 self.invalidatecaches()
1045 1045
1046 1046 def walk(self, match, node=None):
1047 1047 '''
1048 1048 walk recursively through the directory tree or a given
1049 1049 changeset, finding all files matched by the match
1050 1050 function
1051 1051 '''
1052 1052 return self[node].walk(match)
1053 1053
1054 1054 def status(self, node1='.', node2=None, match=None,
1055 1055 ignored=False, clean=False, unknown=False,
1056 1056 listsubrepos=False):
1057 1057 """return status of files between two nodes or node and working directory
1058 1058
1059 1059 If node1 is None, use the first dirstate parent instead.
1060 1060 If node2 is None, compare node1 with working directory.
1061 1061 """
1062 1062
1063 1063 def mfmatches(ctx):
1064 1064 mf = ctx.manifest().copy()
1065 1065 for fn in mf.keys():
1066 1066 if not match(fn):
1067 1067 del mf[fn]
1068 1068 return mf
1069 1069
1070 1070 if isinstance(node1, context.changectx):
1071 1071 ctx1 = node1
1072 1072 else:
1073 1073 ctx1 = self[node1]
1074 1074 if isinstance(node2, context.changectx):
1075 1075 ctx2 = node2
1076 1076 else:
1077 1077 ctx2 = self[node2]
1078 1078
1079 1079 working = ctx2.rev() is None
1080 1080 parentworking = working and ctx1 == self['.']
1081 1081 match = match or matchmod.always(self.root, self.getcwd())
1082 1082 listignored, listclean, listunknown = ignored, clean, unknown
1083 1083
1084 1084 # load earliest manifest first for caching reasons
1085 1085 if not working and ctx2.rev() < ctx1.rev():
1086 1086 ctx2.manifest()
1087 1087
1088 1088 if not parentworking:
1089 1089 def bad(f, msg):
1090 1090 if f not in ctx1:
1091 1091 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1092 1092 match.bad = bad
1093 1093
1094 1094 if working: # we need to scan the working dir
1095 1095 subrepos = []
1096 1096 if '.hgsub' in self.dirstate:
1097 1097 subrepos = ctx1.substate.keys()
1098 1098 s = self.dirstate.status(match, subrepos, listignored,
1099 1099 listclean, listunknown)
1100 1100 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1101 1101
1102 1102 # check for any possibly clean files
1103 1103 if parentworking and cmp:
1104 1104 fixup = []
1105 1105 # do a full compare of any files that might have changed
1106 1106 for f in sorted(cmp):
1107 1107 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1108 1108 or ctx1[f].cmp(ctx2[f])):
1109 1109 modified.append(f)
1110 1110 else:
1111 1111 fixup.append(f)
1112 1112
1113 1113 # update dirstate for files that are actually clean
1114 1114 if fixup:
1115 1115 if listclean:
1116 1116 clean += fixup
1117 1117
1118 1118 try:
1119 1119 # updating the dirstate is optional
1120 1120 # so we don't wait on the lock
1121 1121 wlock = self.wlock(False)
1122 1122 try:
1123 1123 for f in fixup:
1124 1124 self.dirstate.normal(f)
1125 1125 finally:
1126 1126 wlock.release()
1127 1127 except error.LockError:
1128 1128 pass
1129 1129
1130 1130 if not parentworking:
1131 1131 mf1 = mfmatches(ctx1)
1132 1132 if working:
1133 1133 # we are comparing working dir against non-parent
1134 1134 # generate a pseudo-manifest for the working dir
1135 1135 mf2 = mfmatches(self['.'])
1136 1136 for f in cmp + modified + added:
1137 1137 mf2[f] = None
1138 1138 mf2.set(f, ctx2.flags(f))
1139 1139 for f in removed:
1140 1140 if f in mf2:
1141 1141 del mf2[f]
1142 1142 else:
1143 1143 # we are comparing two revisions
1144 1144 deleted, unknown, ignored = [], [], []
1145 1145 mf2 = mfmatches(ctx2)
1146 1146
1147 1147 modified, added, clean = [], [], []
1148 1148 for fn in mf2:
1149 1149 if fn in mf1:
1150 1150 if (mf1.flags(fn) != mf2.flags(fn) or
1151 1151 (mf1[fn] != mf2[fn] and
1152 1152 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1153 1153 modified.append(fn)
1154 1154 elif listclean:
1155 1155 clean.append(fn)
1156 1156 del mf1[fn]
1157 1157 else:
1158 1158 added.append(fn)
1159 1159 removed = mf1.keys()
1160 1160
1161 1161 r = modified, added, removed, deleted, unknown, ignored, clean
1162 1162
1163 1163 if listsubrepos:
1164 # Create a (subpath, ctx) mapping where we prefer subpaths
1165 # from ctx1. The subpaths from ctx2 are important when the
1166 # .hgsub file has been modified (in ctx2) but not yet
1167 # committed (in ctx1).
1168 subpaths = dict.fromkeys(ctx2.substate, ctx2)
1169 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
1170 for subpath, ctx in subpaths.iteritems():
1171 sub = ctx.sub(subpath)
1164 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1172 1165 if working:
1173 1166 rev2 = None
1174 1167 else:
1175 1168 rev2 = ctx2.substate[subpath][1]
1176 1169 try:
1177 1170 submatch = matchmod.narrowmatcher(subpath, match)
1178 1171 s = sub.status(rev2, match=submatch, ignored=listignored,
1179 1172 clean=listclean, unknown=listunknown,
1180 1173 listsubrepos=True)
1181 1174 for rfiles, sfiles in zip(r, s):
1182 1175 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1183 1176 except error.LookupError:
1184 1177 self.ui.status(_("skipping missing subrepository: %s\n")
1185 1178 % subpath)
1186 1179
1187 1180 [l.sort() for l in r]
1188 1181 return r
1189 1182
1190 1183 def heads(self, start=None):
1191 1184 heads = self.changelog.heads(start)
1192 1185 # sort the output in rev descending order
1193 1186 heads = [(-self.changelog.rev(h), h) for h in heads]
1194 1187 return [n for (r, n) in sorted(heads)]
1195 1188
1196 1189 def branchheads(self, branch=None, start=None, closed=False):
1197 1190 '''return a (possibly filtered) list of heads for the given branch
1198 1191
1199 1192 Heads are returned in topological order, from newest to oldest.
1200 1193 If branch is None, use the dirstate branch.
1201 1194 If start is not None, return only heads reachable from start.
1202 1195 If closed is True, return heads that are marked as closed as well.
1203 1196 '''
1204 1197 if branch is None:
1205 1198 branch = self[None].branch()
1206 1199 branches = self.branchmap()
1207 1200 if branch not in branches:
1208 1201 return []
1209 1202 # the cache returns heads ordered lowest to highest
1210 1203 bheads = list(reversed(branches[branch]))
1211 1204 if start is not None:
1212 1205 # filter out the heads that cannot be reached from startrev
1213 1206 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1214 1207 bheads = [h for h in bheads if h in fbheads]
1215 1208 if not closed:
1216 1209 bheads = [h for h in bheads if
1217 1210 ('close' not in self.changelog.read(h)[5])]
1218 1211 return bheads
1219 1212
1220 1213 def branches(self, nodes):
1221 1214 if not nodes:
1222 1215 nodes = [self.changelog.tip()]
1223 1216 b = []
1224 1217 for n in nodes:
1225 1218 t = n
1226 1219 while 1:
1227 1220 p = self.changelog.parents(n)
1228 1221 if p[1] != nullid or p[0] == nullid:
1229 1222 b.append((t, n, p[0], p[1]))
1230 1223 break
1231 1224 n = p[0]
1232 1225 return b
1233 1226
1234 1227 def between(self, pairs):
1235 1228 r = []
1236 1229
1237 1230 for top, bottom in pairs:
1238 1231 n, l, i = top, [], 0
1239 1232 f = 1
1240 1233
1241 1234 while n != bottom and n != nullid:
1242 1235 p = self.changelog.parents(n)[0]
1243 1236 if i == f:
1244 1237 l.append(n)
1245 1238 f = f * 2
1246 1239 n = p
1247 1240 i += 1
1248 1241
1249 1242 r.append(l)
1250 1243
1251 1244 return r
1252 1245
1253 1246 def pull(self, remote, heads=None, force=False):
1254 1247 lock = self.lock()
1255 1248 try:
1256 1249 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1257 1250 force=force)
1258 1251 common, fetch, rheads = tmp
1259 1252 if not fetch:
1260 1253 self.ui.status(_("no changes found\n"))
1261 1254 return 0
1262 1255
1263 1256 if fetch == [nullid]:
1264 1257 self.ui.status(_("requesting all changes\n"))
1265 1258 elif heads is None and remote.capable('changegroupsubset'):
1266 1259 # issue1320, avoid a race if remote changed after discovery
1267 1260 heads = rheads
1268 1261
1269 1262 if heads is None:
1270 1263 cg = remote.changegroup(fetch, 'pull')
1271 1264 else:
1272 1265 if not remote.capable('changegroupsubset'):
1273 1266 raise util.Abort(_("partial pull cannot be done because "
1274 1267 "other repository doesn't support "
1275 1268 "changegroupsubset."))
1276 1269 cg = remote.changegroupsubset(fetch, heads, 'pull')
1277 1270 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1278 1271 finally:
1279 1272 lock.release()
1280 1273
1281 1274 def push(self, remote, force=False, revs=None, newbranch=False):
1282 1275 '''Push outgoing changesets (limited by revs) from the current
1283 1276 repository to remote. Return an integer:
1284 1277 - 0 means HTTP error *or* nothing to push
1285 1278 - 1 means we pushed and remote head count is unchanged *or*
1286 1279 we have outgoing changesets but refused to push
1287 1280 - other values as described by addchangegroup()
1288 1281 '''
1289 1282 # there are two ways to push to remote repo:
1290 1283 #
1291 1284 # addchangegroup assumes local user can lock remote
1292 1285 # repo (local filesystem, old ssh servers).
1293 1286 #
1294 1287 # unbundle assumes local user cannot lock remote repo (new ssh
1295 1288 # servers, http servers).
1296 1289
1297 1290 lock = None
1298 1291 unbundle = remote.capable('unbundle')
1299 1292 if not unbundle:
1300 1293 lock = remote.lock()
1301 1294 try:
1302 1295 ret = discovery.prepush(self, remote, force, revs, newbranch)
1303 1296 if ret[0] is None:
1304 1297 # and here we return 0 for "nothing to push" or 1 for
1305 1298 # "something to push but I refuse"
1306 1299 return ret[1]
1307 1300
1308 1301 cg, remote_heads = ret
1309 1302 if unbundle:
1310 1303 # local repo finds heads on server, finds out what revs it must
1311 1304 # push. once revs transferred, if server finds it has
1312 1305 # different heads (someone else won commit/push race), server
1313 1306 # aborts.
1314 1307 if force:
1315 1308 remote_heads = ['force']
1316 1309 # ssh: return remote's addchangegroup()
1317 1310 # http: return remote's addchangegroup() or 0 for error
1318 1311 return remote.unbundle(cg, remote_heads, 'push')
1319 1312 else:
1320 1313 # we return an integer indicating remote head count change
1321 1314 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1322 1315 finally:
1323 1316 if lock is not None:
1324 1317 lock.release()
1325 1318
1326 1319 def changegroupinfo(self, nodes, source):
1327 1320 if self.ui.verbose or source == 'bundle':
1328 1321 self.ui.status(_("%d changesets found\n") % len(nodes))
1329 1322 if self.ui.debugflag:
1330 1323 self.ui.debug("list of changesets:\n")
1331 1324 for node in nodes:
1332 1325 self.ui.debug("%s\n" % hex(node))
1333 1326
1334 1327 def changegroupsubset(self, bases, heads, source, extranodes=None):
1335 1328 """Compute a changegroup consisting of all the nodes that are
1336 1329 descendents of any of the bases and ancestors of any of the heads.
1337 1330 Return a chunkbuffer object whose read() method will return
1338 1331 successive changegroup chunks.
1339 1332
1340 1333 It is fairly complex as determining which filenodes and which
1341 1334 manifest nodes need to be included for the changeset to be complete
1342 1335 is non-trivial.
1343 1336
1344 1337 Another wrinkle is doing the reverse, figuring out which changeset in
1345 1338 the changegroup a particular filenode or manifestnode belongs to.
1346 1339
1347 1340 The caller can specify some nodes that must be included in the
1348 1341 changegroup using the extranodes argument. It should be a dict
1349 1342 where the keys are the filenames (or 1 for the manifest), and the
1350 1343 values are lists of (node, linknode) tuples, where node is a wanted
1351 1344 node and linknode is the changelog node that should be transmitted as
1352 1345 the linkrev.
1353 1346 """
1354 1347
1355 1348 # Set up some initial variables
1356 1349 # Make it easy to refer to self.changelog
1357 1350 cl = self.changelog
1358 1351 # Compute the list of changesets in this changegroup.
1359 1352 # Some bases may turn out to be superfluous, and some heads may be
1360 1353 # too. nodesbetween will return the minimal set of bases and heads
1361 1354 # necessary to re-create the changegroup.
1362 1355 if not bases:
1363 1356 bases = [nullid]
1364 1357 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1365 1358
1366 1359 if extranodes is None:
1367 1360 # can we go through the fast path ?
1368 1361 heads.sort()
1369 1362 allheads = self.heads()
1370 1363 allheads.sort()
1371 1364 if heads == allheads:
1372 1365 return self._changegroup(msng_cl_lst, source)
1373 1366
1374 1367 # slow path
1375 1368 self.hook('preoutgoing', throw=True, source=source)
1376 1369
1377 1370 self.changegroupinfo(msng_cl_lst, source)
1378 1371
1379 1372 # We assume that all ancestors of bases are known
1380 1373 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1381 1374
1382 1375 # Make it easy to refer to self.manifest
1383 1376 mnfst = self.manifest
1384 1377 # We don't know which manifests are missing yet
1385 1378 msng_mnfst_set = {}
1386 1379 # Nor do we know which filenodes are missing.
1387 1380 msng_filenode_set = {}
1388 1381
1389 1382 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1390 1383 junk = None
1391 1384
1392 1385 # A changeset always belongs to itself, so the changenode lookup
1393 1386 # function for a changenode is identity.
1394 1387 def identity(x):
1395 1388 return x
1396 1389
1397 1390 # A function generating function that sets up the initial environment
1398 1391 # the inner function.
1399 1392 def filenode_collector(changedfiles):
1400 1393 # This gathers information from each manifestnode included in the
1401 1394 # changegroup about which filenodes the manifest node references
1402 1395 # so we can include those in the changegroup too.
1403 1396 #
1404 1397 # It also remembers which changenode each filenode belongs to. It
1405 1398 # does this by assuming the a filenode belongs to the changenode
1406 1399 # the first manifest that references it belongs to.
1407 1400 def collect_msng_filenodes(mnfstnode):
1408 1401 r = mnfst.rev(mnfstnode)
1409 1402 if r - 1 in mnfst.parentrevs(r):
1410 1403 # If the previous rev is one of the parents,
1411 1404 # we only need to see a diff.
1412 1405 deltamf = mnfst.readdelta(mnfstnode)
1413 1406 # For each line in the delta
1414 1407 for f, fnode in deltamf.iteritems():
1415 1408 # And if the file is in the list of files we care
1416 1409 # about.
1417 1410 if f in changedfiles:
1418 1411 # Get the changenode this manifest belongs to
1419 1412 clnode = msng_mnfst_set[mnfstnode]
1420 1413 # Create the set of filenodes for the file if
1421 1414 # there isn't one already.
1422 1415 ndset = msng_filenode_set.setdefault(f, {})
1423 1416 # And set the filenode's changelog node to the
1424 1417 # manifest's if it hasn't been set already.
1425 1418 ndset.setdefault(fnode, clnode)
1426 1419 else:
1427 1420 # Otherwise we need a full manifest.
1428 1421 m = mnfst.read(mnfstnode)
1429 1422 # For every file in we care about.
1430 1423 for f in changedfiles:
1431 1424 fnode = m.get(f, None)
1432 1425 # If it's in the manifest
1433 1426 if fnode is not None:
1434 1427 # See comments above.
1435 1428 clnode = msng_mnfst_set[mnfstnode]
1436 1429 ndset = msng_filenode_set.setdefault(f, {})
1437 1430 ndset.setdefault(fnode, clnode)
1438 1431 return collect_msng_filenodes
1439 1432
1440 1433 # If we determine that a particular file or manifest node must be a
1441 1434 # node that the recipient of the changegroup will already have, we can
1442 1435 # also assume the recipient will have all the parents. This function
1443 1436 # prunes them from the set of missing nodes.
1444 1437 def prune(revlog, missingnodes):
1445 1438 hasset = set()
1446 1439 # If a 'missing' filenode thinks it belongs to a changenode we
1447 1440 # assume the recipient must have, then the recipient must have
1448 1441 # that filenode.
1449 1442 for n in missingnodes:
1450 1443 clrev = revlog.linkrev(revlog.rev(n))
1451 1444 if clrev in commonrevs:
1452 1445 hasset.add(n)
1453 1446 for n in hasset:
1454 1447 missingnodes.pop(n, None)
1455 1448 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1456 1449 missingnodes.pop(revlog.node(r), None)
1457 1450
1458 1451 # Add the nodes that were explicitly requested.
1459 1452 def add_extra_nodes(name, nodes):
1460 1453 if not extranodes or name not in extranodes:
1461 1454 return
1462 1455
1463 1456 for node, linknode in extranodes[name]:
1464 1457 if node not in nodes:
1465 1458 nodes[node] = linknode
1466 1459
1467 1460 # Now that we have all theses utility functions to help out and
1468 1461 # logically divide up the task, generate the group.
1469 1462 def gengroup():
1470 1463 # The set of changed files starts empty.
1471 1464 changedfiles = set()
1472 1465 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1473 1466
1474 1467 # Create a changenode group generator that will call our functions
1475 1468 # back to lookup the owning changenode and collect information.
1476 1469 group = cl.group(msng_cl_lst, identity, collect)
1477 1470 for cnt, chnk in enumerate(group):
1478 1471 yield chnk
1479 1472 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1480 1473 self.ui.progress(_('bundling changes'), None)
1481 1474
1482 1475 prune(mnfst, msng_mnfst_set)
1483 1476 add_extra_nodes(1, msng_mnfst_set)
1484 1477 msng_mnfst_lst = msng_mnfst_set.keys()
1485 1478 # Sort the manifestnodes by revision number.
1486 1479 msng_mnfst_lst.sort(key=mnfst.rev)
1487 1480 # Create a generator for the manifestnodes that calls our lookup
1488 1481 # and data collection functions back.
1489 1482 group = mnfst.group(msng_mnfst_lst,
1490 1483 lambda mnode: msng_mnfst_set[mnode],
1491 1484 filenode_collector(changedfiles))
1492 1485 for cnt, chnk in enumerate(group):
1493 1486 yield chnk
1494 1487 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1495 1488 self.ui.progress(_('bundling manifests'), None)
1496 1489
1497 1490 # These are no longer needed, dereference and toss the memory for
1498 1491 # them.
1499 1492 msng_mnfst_lst = None
1500 1493 msng_mnfst_set.clear()
1501 1494
1502 1495 if extranodes:
1503 1496 for fname in extranodes:
1504 1497 if isinstance(fname, int):
1505 1498 continue
1506 1499 msng_filenode_set.setdefault(fname, {})
1507 1500 changedfiles.add(fname)
1508 1501 # Go through all our files in order sorted by name.
1509 1502 cnt = 0
1510 1503 for fname in sorted(changedfiles):
1511 1504 filerevlog = self.file(fname)
1512 1505 if not len(filerevlog):
1513 1506 raise util.Abort(_("empty or missing revlog for %s") % fname)
1514 1507 # Toss out the filenodes that the recipient isn't really
1515 1508 # missing.
1516 1509 missingfnodes = msng_filenode_set.pop(fname, {})
1517 1510 prune(filerevlog, missingfnodes)
1518 1511 add_extra_nodes(fname, missingfnodes)
1519 1512 # If any filenodes are left, generate the group for them,
1520 1513 # otherwise don't bother.
1521 1514 if missingfnodes:
1522 1515 yield changegroup.chunkheader(len(fname))
1523 1516 yield fname
1524 1517 # Sort the filenodes by their revision # (topological order)
1525 1518 nodeiter = list(missingfnodes)
1526 1519 nodeiter.sort(key=filerevlog.rev)
1527 1520 # Create a group generator and only pass in a changenode
1528 1521 # lookup function as we need to collect no information
1529 1522 # from filenodes.
1530 1523 group = filerevlog.group(nodeiter,
1531 1524 lambda fnode: missingfnodes[fnode])
1532 1525 for chnk in group:
1533 1526 self.ui.progress(
1534 1527 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1535 1528 cnt += 1
1536 1529 yield chnk
1537 1530 # Signal that no more groups are left.
1538 1531 yield changegroup.closechunk()
1539 1532 self.ui.progress(_('bundling files'), None)
1540 1533
1541 1534 if msng_cl_lst:
1542 1535 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1543 1536
1544 1537 return util.chunkbuffer(gengroup())
1545 1538
1546 1539 def changegroup(self, basenodes, source):
1547 1540 # to avoid a race we use changegroupsubset() (issue1320)
1548 1541 return self.changegroupsubset(basenodes, self.heads(), source)
1549 1542
1550 1543 def _changegroup(self, nodes, source):
1551 1544 """Compute the changegroup of all nodes that we have that a recipient
1552 1545 doesn't. Return a chunkbuffer object whose read() method will return
1553 1546 successive changegroup chunks.
1554 1547
1555 1548 This is much easier than the previous function as we can assume that
1556 1549 the recipient has any changenode we aren't sending them.
1557 1550
1558 1551 nodes is the set of nodes to send"""
1559 1552
1560 1553 self.hook('preoutgoing', throw=True, source=source)
1561 1554
1562 1555 cl = self.changelog
1563 1556 revset = set([cl.rev(n) for n in nodes])
1564 1557 self.changegroupinfo(nodes, source)
1565 1558
1566 1559 def identity(x):
1567 1560 return x
1568 1561
1569 1562 def gennodelst(log):
1570 1563 for r in log:
1571 1564 if log.linkrev(r) in revset:
1572 1565 yield log.node(r)
1573 1566
1574 1567 def lookuplinkrev_func(revlog):
1575 1568 def lookuplinkrev(n):
1576 1569 return cl.node(revlog.linkrev(revlog.rev(n)))
1577 1570 return lookuplinkrev
1578 1571
1579 1572 def gengroup():
1580 1573 '''yield a sequence of changegroup chunks (strings)'''
1581 1574 # construct a list of all changed files
1582 1575 changedfiles = set()
1583 1576 mmfs = {}
1584 1577 collect = changegroup.collector(cl, mmfs, changedfiles)
1585 1578
1586 1579 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1587 1580 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1588 1581 yield chnk
1589 1582 self.ui.progress(_('bundling changes'), None)
1590 1583
1591 1584 mnfst = self.manifest
1592 1585 nodeiter = gennodelst(mnfst)
1593 1586 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1594 1587 lookuplinkrev_func(mnfst))):
1595 1588 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1596 1589 yield chnk
1597 1590 self.ui.progress(_('bundling manifests'), None)
1598 1591
1599 1592 cnt = 0
1600 1593 for fname in sorted(changedfiles):
1601 1594 filerevlog = self.file(fname)
1602 1595 if not len(filerevlog):
1603 1596 raise util.Abort(_("empty or missing revlog for %s") % fname)
1604 1597 nodeiter = gennodelst(filerevlog)
1605 1598 nodeiter = list(nodeiter)
1606 1599 if nodeiter:
1607 1600 yield changegroup.chunkheader(len(fname))
1608 1601 yield fname
1609 1602 lookup = lookuplinkrev_func(filerevlog)
1610 1603 for chnk in filerevlog.group(nodeiter, lookup):
1611 1604 self.ui.progress(
1612 1605 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1613 1606 cnt += 1
1614 1607 yield chnk
1615 1608 self.ui.progress(_('bundling files'), None)
1616 1609
1617 1610 yield changegroup.closechunk()
1618 1611
1619 1612 if nodes:
1620 1613 self.hook('outgoing', node=hex(nodes[0]), source=source)
1621 1614
1622 1615 return util.chunkbuffer(gengroup())
1623 1616
1624 1617 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1625 1618 """Add the changegroup returned by source.read() to this repo.
1626 1619 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1627 1620 the URL of the repo where this changegroup is coming from.
1628 1621
1629 1622 Return an integer summarizing the change to this repo:
1630 1623 - nothing changed or no source: 0
1631 1624 - more heads than before: 1+added heads (2..n)
1632 1625 - fewer heads than before: -1-removed heads (-2..-n)
1633 1626 - number of heads stays the same: 1
1634 1627 """
1635 1628 def csmap(x):
1636 1629 self.ui.debug("add changeset %s\n" % short(x))
1637 1630 return len(cl)
1638 1631
1639 1632 def revmap(x):
1640 1633 return cl.rev(x)
1641 1634
1642 1635 if not source:
1643 1636 return 0
1644 1637
1645 1638 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1646 1639
1647 1640 changesets = files = revisions = 0
1648 1641 efiles = set()
1649 1642
1650 1643 # write changelog data to temp files so concurrent readers will not see
1651 1644 # inconsistent view
1652 1645 cl = self.changelog
1653 1646 cl.delayupdate()
1654 1647 oldheads = len(cl.heads())
1655 1648
1656 1649 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1657 1650 try:
1658 1651 trp = weakref.proxy(tr)
1659 1652 # pull off the changeset group
1660 1653 self.ui.status(_("adding changesets\n"))
1661 1654 clstart = len(cl)
1662 1655 class prog(object):
1663 1656 step = _('changesets')
1664 1657 count = 1
1665 1658 ui = self.ui
1666 1659 total = None
1667 1660 def __call__(self):
1668 1661 self.ui.progress(self.step, self.count, unit=_('chunks'),
1669 1662 total=self.total)
1670 1663 self.count += 1
1671 1664 pr = prog()
1672 1665 chunkiter = changegroup.chunkiter(source, progress=pr)
1673 1666 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1674 1667 raise util.Abort(_("received changelog group is empty"))
1675 1668 clend = len(cl)
1676 1669 changesets = clend - clstart
1677 1670 for c in xrange(clstart, clend):
1678 1671 efiles.update(self[c].files())
1679 1672 efiles = len(efiles)
1680 1673 self.ui.progress(_('changesets'), None)
1681 1674
1682 1675 # pull off the manifest group
1683 1676 self.ui.status(_("adding manifests\n"))
1684 1677 pr.step = _('manifests')
1685 1678 pr.count = 1
1686 1679 pr.total = changesets # manifests <= changesets
1687 1680 chunkiter = changegroup.chunkiter(source, progress=pr)
1688 1681 # no need to check for empty manifest group here:
1689 1682 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1690 1683 # no new manifest will be created and the manifest group will
1691 1684 # be empty during the pull
1692 1685 self.manifest.addgroup(chunkiter, revmap, trp)
1693 1686 self.ui.progress(_('manifests'), None)
1694 1687
1695 1688 needfiles = {}
1696 1689 if self.ui.configbool('server', 'validate', default=False):
1697 1690 # validate incoming csets have their manifests
1698 1691 for cset in xrange(clstart, clend):
1699 1692 mfest = self.changelog.read(self.changelog.node(cset))[0]
1700 1693 mfest = self.manifest.readdelta(mfest)
1701 1694 # store file nodes we must see
1702 1695 for f, n in mfest.iteritems():
1703 1696 needfiles.setdefault(f, set()).add(n)
1704 1697
1705 1698 # process the files
1706 1699 self.ui.status(_("adding file changes\n"))
1707 1700 pr.step = 'files'
1708 1701 pr.count = 1
1709 1702 pr.total = efiles
1710 1703 while 1:
1711 1704 f = changegroup.getchunk(source)
1712 1705 if not f:
1713 1706 break
1714 1707 self.ui.debug("adding %s revisions\n" % f)
1715 1708 pr()
1716 1709 fl = self.file(f)
1717 1710 o = len(fl)
1718 1711 chunkiter = changegroup.chunkiter(source)
1719 1712 if fl.addgroup(chunkiter, revmap, trp) is None:
1720 1713 raise util.Abort(_("received file revlog group is empty"))
1721 1714 revisions += len(fl) - o
1722 1715 files += 1
1723 1716 if f in needfiles:
1724 1717 needs = needfiles[f]
1725 1718 for new in xrange(o, len(fl)):
1726 1719 n = fl.node(new)
1727 1720 if n in needs:
1728 1721 needs.remove(n)
1729 1722 if not needs:
1730 1723 del needfiles[f]
1731 1724 self.ui.progress(_('files'), None)
1732 1725
1733 1726 for f, needs in needfiles.iteritems():
1734 1727 fl = self.file(f)
1735 1728 for n in needs:
1736 1729 try:
1737 1730 fl.rev(n)
1738 1731 except error.LookupError:
1739 1732 raise util.Abort(
1740 1733 _('missing file data for %s:%s - run hg verify') %
1741 1734 (f, hex(n)))
1742 1735
1743 1736 newheads = len(cl.heads())
1744 1737 heads = ""
1745 1738 if oldheads and newheads != oldheads:
1746 1739 heads = _(" (%+d heads)") % (newheads - oldheads)
1747 1740
1748 1741 self.ui.status(_("added %d changesets"
1749 1742 " with %d changes to %d files%s\n")
1750 1743 % (changesets, revisions, files, heads))
1751 1744
1752 1745 if changesets > 0:
1753 1746 p = lambda: cl.writepending() and self.root or ""
1754 1747 self.hook('pretxnchangegroup', throw=True,
1755 1748 node=hex(cl.node(clstart)), source=srctype,
1756 1749 url=url, pending=p)
1757 1750
1758 1751 # make changelog see real files again
1759 1752 cl.finalize(trp)
1760 1753
1761 1754 tr.close()
1762 1755 finally:
1763 1756 tr.release()
1764 1757 if lock:
1765 1758 lock.release()
1766 1759
1767 1760 if changesets > 0:
1768 1761 # forcefully update the on-disk branch cache
1769 1762 self.ui.debug("updating the branch cache\n")
1770 1763 self.updatebranchcache()
1771 1764 self.hook("changegroup", node=hex(cl.node(clstart)),
1772 1765 source=srctype, url=url)
1773 1766
1774 1767 for i in xrange(clstart, clend):
1775 1768 self.hook("incoming", node=hex(cl.node(i)),
1776 1769 source=srctype, url=url)
1777 1770
1778 1771 # never return 0 here:
1779 1772 if newheads < oldheads:
1780 1773 return newheads - oldheads - 1
1781 1774 else:
1782 1775 return newheads - oldheads + 1
1783 1776
1784 1777
1785 1778 def stream_in(self, remote):
1786 1779 fp = remote.stream_out()
1787 1780 l = fp.readline()
1788 1781 try:
1789 1782 resp = int(l)
1790 1783 except ValueError:
1791 1784 raise error.ResponseError(
1792 1785 _('Unexpected response from remote server:'), l)
1793 1786 if resp == 1:
1794 1787 raise util.Abort(_('operation forbidden by server'))
1795 1788 elif resp == 2:
1796 1789 raise util.Abort(_('locking the remote repository failed'))
1797 1790 elif resp != 0:
1798 1791 raise util.Abort(_('the server sent an unknown error code'))
1799 1792 self.ui.status(_('streaming all changes\n'))
1800 1793 l = fp.readline()
1801 1794 try:
1802 1795 total_files, total_bytes = map(int, l.split(' ', 1))
1803 1796 except (ValueError, TypeError):
1804 1797 raise error.ResponseError(
1805 1798 _('Unexpected response from remote server:'), l)
1806 1799 self.ui.status(_('%d files to transfer, %s of data\n') %
1807 1800 (total_files, util.bytecount(total_bytes)))
1808 1801 start = time.time()
1809 1802 for i in xrange(total_files):
1810 1803 # XXX doesn't support '\n' or '\r' in filenames
1811 1804 l = fp.readline()
1812 1805 try:
1813 1806 name, size = l.split('\0', 1)
1814 1807 size = int(size)
1815 1808 except (ValueError, TypeError):
1816 1809 raise error.ResponseError(
1817 1810 _('Unexpected response from remote server:'), l)
1818 1811 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1819 1812 # for backwards compat, name was partially encoded
1820 1813 ofp = self.sopener(store.decodedir(name), 'w')
1821 1814 for chunk in util.filechunkiter(fp, limit=size):
1822 1815 ofp.write(chunk)
1823 1816 ofp.close()
1824 1817 elapsed = time.time() - start
1825 1818 if elapsed <= 0:
1826 1819 elapsed = 0.001
1827 1820 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1828 1821 (util.bytecount(total_bytes), elapsed,
1829 1822 util.bytecount(total_bytes / elapsed)))
1830 1823 self.invalidate()
1831 1824 return len(self.heads()) + 1
1832 1825
1833 1826 def clone(self, remote, heads=[], stream=False):
1834 1827 '''clone remote repository.
1835 1828
1836 1829 keyword arguments:
1837 1830 heads: list of revs to clone (forces use of pull)
1838 1831 stream: use streaming clone if possible'''
1839 1832
1840 1833 # now, all clients that can request uncompressed clones can
1841 1834 # read repo formats supported by all servers that can serve
1842 1835 # them.
1843 1836
1844 1837 # if revlog format changes, client will have to check version
1845 1838 # and format flags on "stream" capability, and use
1846 1839 # uncompressed only if compatible.
1847 1840
1848 1841 if stream and not heads and remote.capable('stream'):
1849 1842 return self.stream_in(remote)
1850 1843 return self.pull(remote, heads)
1851 1844
1852 1845 def pushkey(self, namespace, key, old, new):
1853 1846 return pushkey.push(self, namespace, key, old, new)
1854 1847
1855 1848 def listkeys(self, namespace):
1856 1849 return pushkey.list(self, namespace)
1857 1850
1858 1851 # used to avoid circular references so destructors work
1859 1852 def aftertrans(files):
1860 1853 renamefiles = [tuple(t) for t in files]
1861 1854 def a():
1862 1855 for src, dest in renamefiles:
1863 1856 util.rename(src, dest)
1864 1857 return a
1865 1858
1866 1859 def instance(ui, path, create):
1867 1860 return localrepository(ui, util.drop_scheme('file', path), create)
1868 1861
1869 1862 def islocal(path):
1870 1863 return True
@@ -1,482 +1,492 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
9 9 from i18n import _
10 10 import config, util, node, error, cmdutil
11 11 hg = None
12 12
13 13 nullstate = ('', '', 'empty')
14 14
15 15 def state(ctx, ui):
16 16 """return a state dict, mapping subrepo paths configured in .hgsub
17 17 to tuple: (source from .hgsub, revision from .hgsubstate, kind
18 18 (key in types dict))
19 19 """
20 20 p = config.config()
21 21 def read(f, sections=None, remap=None):
22 22 if f in ctx:
23 23 p.parse(f, ctx[f].data(), sections, remap, read)
24 24 else:
25 25 raise util.Abort(_("subrepo spec file %s not found") % f)
26 26
27 27 if '.hgsub' in ctx:
28 28 read('.hgsub')
29 29
30 30 for path, src in ui.configitems('subpaths'):
31 31 p.set('subpaths', path, src, ui.configsource('subpaths', path))
32 32
33 33 rev = {}
34 34 if '.hgsubstate' in ctx:
35 35 try:
36 36 for l in ctx['.hgsubstate'].data().splitlines():
37 37 revision, path = l.split(" ", 1)
38 38 rev[path] = revision
39 39 except IOError, err:
40 40 if err.errno != errno.ENOENT:
41 41 raise
42 42
43 43 state = {}
44 44 for path, src in p[''].items():
45 45 kind = 'hg'
46 46 if src.startswith('['):
47 47 if ']' not in src:
48 48 raise util.Abort(_('missing ] in subrepo source'))
49 49 kind, src = src.split(']', 1)
50 50 kind = kind[1:]
51 51
52 52 for pattern, repl in p.items('subpaths'):
53 53 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
54 54 # does a string decode.
55 55 repl = repl.encode('string-escape')
56 56 # However, we still want to allow back references to go
57 57 # through unharmed, so we turn r'\\1' into r'\1'. Again,
58 58 # extra escapes are needed because re.sub string decodes.
59 59 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
60 60 try:
61 61 src = re.sub(pattern, repl, src, 1)
62 62 except re.error, e:
63 63 raise util.Abort(_("bad subrepository pattern in %s: %s")
64 64 % (p.source('subpaths', pattern), e))
65 65
66 66 state[path] = (src.strip(), rev.get(path, ''), kind)
67 67
68 68 return state
69 69
70 70 def writestate(repo, state):
71 71 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
72 72 repo.wwrite('.hgsubstate',
73 73 ''.join(['%s %s\n' % (state[s][1], s)
74 74 for s in sorted(state)]), '')
75 75
76 76 def submerge(repo, wctx, mctx, actx):
77 77 """delegated from merge.applyupdates: merging of .hgsubstate file
78 78 in working context, merging context and ancestor context"""
79 79 if mctx == actx: # backwards?
80 80 actx = wctx.p1()
81 81 s1 = wctx.substate
82 82 s2 = mctx.substate
83 83 sa = actx.substate
84 84 sm = {}
85 85
86 86 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
87 87
88 88 def debug(s, msg, r=""):
89 89 if r:
90 90 r = "%s:%s:%s" % r
91 91 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
92 92
93 93 for s, l in s1.items():
94 94 a = sa.get(s, nullstate)
95 95 ld = l # local state with possible dirty flag for compares
96 96 if wctx.sub(s).dirty():
97 97 ld = (l[0], l[1] + "+")
98 98 if wctx == actx: # overwrite
99 99 a = ld
100 100
101 101 if s in s2:
102 102 r = s2[s]
103 103 if ld == r or r == a: # no change or local is newer
104 104 sm[s] = l
105 105 continue
106 106 elif ld == a: # other side changed
107 107 debug(s, "other changed, get", r)
108 108 wctx.sub(s).get(r)
109 109 sm[s] = r
110 110 elif ld[0] != r[0]: # sources differ
111 111 if repo.ui.promptchoice(
112 112 _(' subrepository sources for %s differ\n'
113 113 'use (l)ocal source (%s) or (r)emote source (%s)?')
114 114 % (s, l[0], r[0]),
115 115 (_('&Local'), _('&Remote')), 0):
116 116 debug(s, "prompt changed, get", r)
117 117 wctx.sub(s).get(r)
118 118 sm[s] = r
119 119 elif ld[1] == a[1]: # local side is unchanged
120 120 debug(s, "other side changed, get", r)
121 121 wctx.sub(s).get(r)
122 122 sm[s] = r
123 123 else:
124 124 debug(s, "both sides changed, merge with", r)
125 125 wctx.sub(s).merge(r)
126 126 sm[s] = l
127 127 elif ld == a: # remote removed, local unchanged
128 128 debug(s, "remote removed, remove")
129 129 wctx.sub(s).remove()
130 130 else:
131 131 if repo.ui.promptchoice(
132 132 _(' local changed subrepository %s which remote removed\n'
133 133 'use (c)hanged version or (d)elete?') % s,
134 134 (_('&Changed'), _('&Delete')), 0):
135 135 debug(s, "prompt remove")
136 136 wctx.sub(s).remove()
137 137
138 138 for s, r in s2.items():
139 139 if s in s1:
140 140 continue
141 141 elif s not in sa:
142 142 debug(s, "remote added, get", r)
143 143 mctx.sub(s).get(r)
144 144 sm[s] = r
145 145 elif r != sa[s]:
146 146 if repo.ui.promptchoice(
147 147 _(' remote changed subrepository %s which local removed\n'
148 148 'use (c)hanged version or (d)elete?') % s,
149 149 (_('&Changed'), _('&Delete')), 0) == 0:
150 150 debug(s, "prompt recreate", r)
151 151 wctx.sub(s).get(r)
152 152 sm[s] = r
153 153
154 154 # record merged .hgsubstate
155 155 writestate(repo, sm)
156 156
157 157 def relpath(sub):
158 158 """return path to this subrepo as seen from outermost repo"""
159 159 if not hasattr(sub, '_repo'):
160 160 return sub._path
161 161 parent = sub._repo
162 162 while hasattr(parent, '_subparent'):
163 163 parent = parent._subparent
164 164 return sub._repo.root[len(parent.root)+1:]
165 165
166 166 def _abssource(repo, push=False):
167 167 """return pull/push path of repo - either based on parent repo
168 168 .hgsub info or on the subrepos own config"""
169 169 if hasattr(repo, '_subparent'):
170 170 source = repo._subsource
171 171 if source.startswith('/') or '://' in source:
172 172 return source
173 173 parent = _abssource(repo._subparent, push)
174 174 if '://' in parent:
175 175 if parent[-1] == '/':
176 176 parent = parent[:-1]
177 177 r = urlparse.urlparse(parent + '/' + source)
178 178 r = urlparse.urlunparse((r[0], r[1],
179 179 posixpath.normpath(r[2]),
180 180 r[3], r[4], r[5]))
181 181 return r
182 182 return posixpath.normpath(os.path.join(parent, repo._subsource))
183 183 if push and repo.ui.config('paths', 'default-push'):
184 184 return repo.ui.config('paths', 'default-push', repo.root)
185 185 return repo.ui.config('paths', 'default', repo.root)
186 186
187 def itersubrepos(ctx1, ctx2):
188 """find subrepos in ctx1 or ctx2"""
189 # Create a (subpath, ctx) mapping where we prefer subpaths from
190 # ctx1. The subpaths from ctx2 are important when the .hgsub file
191 # has been modified (in ctx2) but not yet committed (in ctx1).
192 subpaths = dict.fromkeys(ctx2.substate, ctx2)
193 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
194 for subpath, ctx in sorted(subpaths.iteritems()):
195 yield subpath, ctx.sub(subpath)
196
187 197 def subrepo(ctx, path):
188 198 """return instance of the right subrepo class for subrepo in path"""
189 199 # subrepo inherently violates our import layering rules
190 200 # because it wants to make repo objects from deep inside the stack
191 201 # so we manually delay the circular imports to not break
192 202 # scripts that don't use our demand-loading
193 203 global hg
194 204 import hg as h
195 205 hg = h
196 206
197 207 util.path_auditor(ctx._repo.root)(path)
198 208 state = ctx.substate.get(path, nullstate)
199 209 if state[2] not in types:
200 210 raise util.Abort(_('unknown subrepo type %s') % state[2])
201 211 return types[state[2]](ctx, path, state[:2])
202 212
203 213 # subrepo classes need to implement the following abstract class:
204 214
205 215 class abstractsubrepo(object):
206 216
207 217 def dirty(self):
208 218 """returns true if the dirstate of the subrepo does not match
209 219 current stored state
210 220 """
211 221 raise NotImplementedError
212 222
213 223 def checknested(path):
214 224 """check if path is a subrepository within this repository"""
215 225 return False
216 226
217 227 def commit(self, text, user, date):
218 228 """commit the current changes to the subrepo with the given
219 229 log message. Use given user and date if possible. Return the
220 230 new state of the subrepo.
221 231 """
222 232 raise NotImplementedError
223 233
224 234 def remove(self):
225 235 """remove the subrepo
226 236
227 237 (should verify the dirstate is not dirty first)
228 238 """
229 239 raise NotImplementedError
230 240
231 241 def get(self, state):
232 242 """run whatever commands are needed to put the subrepo into
233 243 this state
234 244 """
235 245 raise NotImplementedError
236 246
237 247 def merge(self, state):
238 248 """merge currently-saved state with the new state."""
239 249 raise NotImplementedError
240 250
241 251 def push(self, force):
242 252 """perform whatever action is analogous to 'hg push'
243 253
244 254 This may be a no-op on some systems.
245 255 """
246 256 raise NotImplementedError
247 257
248 258
249 259 def status(self, rev2, **opts):
250 260 return [], [], [], [], [], [], []
251 261
252 262 def diff(self, diffopts, node2, match, prefix, **opts):
253 263 pass
254 264
255 265 class hgsubrepo(abstractsubrepo):
256 266 def __init__(self, ctx, path, state):
257 267 self._path = path
258 268 self._state = state
259 269 r = ctx._repo
260 270 root = r.wjoin(path)
261 271 create = False
262 272 if not os.path.exists(os.path.join(root, '.hg')):
263 273 create = True
264 274 util.makedirs(root)
265 275 self._repo = hg.repository(r.ui, root, create=create)
266 276 self._repo._subparent = r
267 277 self._repo._subsource = state[0]
268 278
269 279 if create:
270 280 fp = self._repo.opener("hgrc", "w", text=True)
271 281 fp.write('[paths]\n')
272 282
273 283 def addpathconfig(key, value):
274 284 fp.write('%s = %s\n' % (key, value))
275 285 self._repo.ui.setconfig('paths', key, value)
276 286
277 287 defpath = _abssource(self._repo)
278 288 defpushpath = _abssource(self._repo, True)
279 289 addpathconfig('default', defpath)
280 290 if defpath != defpushpath:
281 291 addpathconfig('default-push', defpushpath)
282 292 fp.close()
283 293
284 294 def status(self, rev2, **opts):
285 295 try:
286 296 rev1 = self._state[1]
287 297 ctx1 = self._repo[rev1]
288 298 ctx2 = self._repo[rev2]
289 299 return self._repo.status(ctx1, ctx2, **opts)
290 300 except error.RepoLookupError, inst:
291 301 self._repo.ui.warn(_("warning: %s in %s\n")
292 302 % (inst, relpath(self)))
293 303 return [], [], [], [], [], [], []
294 304
295 305 def diff(self, diffopts, node2, match, prefix, **opts):
296 306 try:
297 307 node1 = node.bin(self._state[1])
298 308 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
299 309 node1, node2, match,
300 310 prefix=os.path.join(prefix, self._path),
301 311 listsubrepos=True, **opts)
302 312 except error.RepoLookupError, inst:
303 313 self._repo.ui.warn(_("warning: %s in %s\n")
304 314 % (inst, relpath(self)))
305 315
306 316 def dirty(self):
307 317 r = self._state[1]
308 318 if r == '':
309 319 return True
310 320 w = self._repo[None]
311 321 if w.p1() != self._repo[r]: # version checked out change
312 322 return True
313 323 return w.dirty() # working directory changed
314 324
315 325 def checknested(self, path):
316 326 return self._repo._checknested(self._repo.wjoin(path))
317 327
318 328 def commit(self, text, user, date):
319 329 self._repo.ui.debug("committing subrepo %s\n" % relpath(self))
320 330 n = self._repo.commit(text, user, date)
321 331 if not n:
322 332 return self._repo['.'].hex() # different version checked out
323 333 return node.hex(n)
324 334
325 335 def remove(self):
326 336 # we can't fully delete the repository as it may contain
327 337 # local-only history
328 338 self._repo.ui.note(_('removing subrepo %s\n') % relpath(self))
329 339 hg.clean(self._repo, node.nullid, False)
330 340
331 341 def _get(self, state):
332 342 source, revision, kind = state
333 343 try:
334 344 self._repo.lookup(revision)
335 345 except error.RepoError:
336 346 self._repo._subsource = source
337 347 srcurl = _abssource(self._repo)
338 348 self._repo.ui.status(_('pulling subrepo %s from %s\n')
339 349 % (relpath(self), srcurl))
340 350 other = hg.repository(self._repo.ui, srcurl)
341 351 self._repo.pull(other)
342 352
343 353 def get(self, state):
344 354 self._get(state)
345 355 source, revision, kind = state
346 356 self._repo.ui.debug("getting subrepo %s\n" % self._path)
347 357 hg.clean(self._repo, revision, False)
348 358
349 359 def merge(self, state):
350 360 self._get(state)
351 361 cur = self._repo['.']
352 362 dst = self._repo[state[1]]
353 363 anc = dst.ancestor(cur)
354 364 if anc == cur:
355 365 self._repo.ui.debug("updating subrepo %s\n" % relpath(self))
356 366 hg.update(self._repo, state[1])
357 367 elif anc == dst:
358 368 self._repo.ui.debug("skipping subrepo %s\n" % relpath(self))
359 369 else:
360 370 self._repo.ui.debug("merging subrepo %s\n" % relpath(self))
361 371 hg.merge(self._repo, state[1], remind=False)
362 372
363 373 def push(self, force):
364 374 # push subrepos depth-first for coherent ordering
365 375 c = self._repo['']
366 376 subs = c.substate # only repos that are committed
367 377 for s in sorted(subs):
368 378 if not c.sub(s).push(force):
369 379 return False
370 380
371 381 dsturl = _abssource(self._repo, True)
372 382 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
373 383 (relpath(self), dsturl))
374 384 other = hg.repository(self._repo.ui, dsturl)
375 385 return self._repo.push(other, force)
376 386
377 387 class svnsubrepo(abstractsubrepo):
378 388 def __init__(self, ctx, path, state):
379 389 self._path = path
380 390 self._state = state
381 391 self._ctx = ctx
382 392 self._ui = ctx._repo.ui
383 393
384 394 def _svncommand(self, commands, filename=''):
385 395 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
386 396 cmd = ['svn'] + commands + [path]
387 397 cmd = [util.shellquote(arg) for arg in cmd]
388 398 cmd = util.quotecommand(' '.join(cmd))
389 399 env = dict(os.environ)
390 400 # Avoid localized output, preserve current locale for everything else.
391 401 env['LC_MESSAGES'] = 'C'
392 402 write, read, err = util.popen3(cmd, env=env, newlines=True)
393 403 retdata = read.read()
394 404 err = err.read().strip()
395 405 if err:
396 406 raise util.Abort(err)
397 407 return retdata
398 408
399 409 def _wcrev(self):
400 410 output = self._svncommand(['info', '--xml'])
401 411 doc = xml.dom.minidom.parseString(output)
402 412 entries = doc.getElementsByTagName('entry')
403 413 if not entries:
404 414 return 0
405 415 return int(entries[0].getAttribute('revision') or 0)
406 416
407 417 def _wcchanged(self):
408 418 """Return (changes, extchanges) where changes is True
409 419 if the working directory was changed, and extchanges is
410 420 True if any of these changes concern an external entry.
411 421 """
412 422 output = self._svncommand(['status', '--xml'])
413 423 externals, changes = [], []
414 424 doc = xml.dom.minidom.parseString(output)
415 425 for e in doc.getElementsByTagName('entry'):
416 426 s = e.getElementsByTagName('wc-status')
417 427 if not s:
418 428 continue
419 429 item = s[0].getAttribute('item')
420 430 props = s[0].getAttribute('props')
421 431 path = e.getAttribute('path')
422 432 if item == 'external':
423 433 externals.append(path)
424 434 if (item not in ('', 'normal', 'unversioned', 'external')
425 435 or props not in ('', 'none')):
426 436 changes.append(path)
427 437 for path in changes:
428 438 for ext in externals:
429 439 if path == ext or path.startswith(ext + os.sep):
430 440 return True, True
431 441 return bool(changes), False
432 442
433 443 def dirty(self):
434 444 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
435 445 return False
436 446 return True
437 447
438 448 def commit(self, text, user, date):
439 449 # user and date are out of our hands since svn is centralized
440 450 changed, extchanged = self._wcchanged()
441 451 if not changed:
442 452 return self._wcrev()
443 453 if extchanged:
444 454 # Do not try to commit externals
445 455 raise util.Abort(_('cannot commit svn externals'))
446 456 commitinfo = self._svncommand(['commit', '-m', text])
447 457 self._ui.status(commitinfo)
448 458 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
449 459 if not newrev:
450 460 raise util.Abort(commitinfo.splitlines()[-1])
451 461 newrev = newrev.groups()[0]
452 462 self._ui.status(self._svncommand(['update', '-r', newrev]))
453 463 return newrev
454 464
455 465 def remove(self):
456 466 if self.dirty():
457 467 self._ui.warn(_('not removing repo %s because '
458 468 'it has changes.\n' % self._path))
459 469 return
460 470 self._ui.note(_('removing subrepo %s\n') % self._path)
461 471 shutil.rmtree(self._ctx.repo.join(self._path))
462 472
463 473 def get(self, state):
464 474 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
465 475 if not re.search('Checked out revision [0-9]+.', status):
466 476 raise util.Abort(status.splitlines()[-1])
467 477 self._ui.status(status)
468 478
469 479 def merge(self, state):
470 480 old = int(self._state[1])
471 481 new = int(state[1])
472 482 if new > old:
473 483 self.get(state)
474 484
475 485 def push(self, force):
476 486 # push is a no-op for SVN
477 487 return True
478 488
479 489 types = {
480 490 'hg': hgsubrepo,
481 491 'svn': svnsubrepo,
482 492 }
General Comments 0
You need to be logged in to leave comments. Login now