##// END OF EJS Templates
linkrev: take a revision number rather than a hash
Matt Mackall -
r7361:9fe97eea default
parent child Browse files
Show More
@@ -1,25 +1,25 b''
1 1 #!/usr/bin/env python
2 2 # Dump revlogs as raw data stream
3 3 # $ find .hg/store/ -name "*.i" | xargs dumprevlog > repo.dump
4 4
5 5 import sys
6 6 from mercurial import revlog, node, util
7 7
8 8 for fp in (sys.stdin, sys.stdout, sys.stderr):
9 9 util.set_binary(fp)
10 10
11 11 for f in sys.argv[1:]:
12 12 binopen = lambda fn: open(fn, 'rb')
13 13 r = revlog.revlog(binopen, f)
14 14 print "file:", f
15 15 for i in r:
16 16 n = r.node(i)
17 17 p = r.parents(n)
18 18 d = r.revision(n)
19 19 print "node:", node.hex(n)
20 print "linkrev:", r.linkrev(n)
20 print "linkrev:", r.linkrev(i)
21 21 print "parents:", node.hex(p[0]), node.hex(p[1])
22 22 print "length:", len(d)
23 23 print "-start-"
24 24 print d
25 25 print "-end-"
@@ -1,1193 +1,1193 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, bisect, stat
11 11 import mdiff, bdiff, util, templater, templatefilters, patch, errno
12 12 import match as _match
13 13
14 14 revrangesep = ':'
15 15
16 16 class UnknownCommand(Exception):
17 17 """Exception raised if command is not in the command table."""
18 18 class AmbiguousCommand(Exception):
19 19 """Exception raised if command shortcut matches more than one command."""
20 20
21 21 def findpossible(cmd, table, strict=False):
22 22 """
23 23 Return cmd -> (aliases, command table entry)
24 24 for each matching command.
25 25 Return debug commands (or their aliases) only if no normal command matches.
26 26 """
27 27 choice = {}
28 28 debugchoice = {}
29 29 for e in table.keys():
30 30 aliases = e.lstrip("^").split("|")
31 31 found = None
32 32 if cmd in aliases:
33 33 found = cmd
34 34 elif not strict:
35 35 for a in aliases:
36 36 if a.startswith(cmd):
37 37 found = a
38 38 break
39 39 if found is not None:
40 40 if aliases[0].startswith("debug") or found.startswith("debug"):
41 41 debugchoice[found] = (aliases, table[e])
42 42 else:
43 43 choice[found] = (aliases, table[e])
44 44
45 45 if not choice and debugchoice:
46 46 choice = debugchoice
47 47
48 48 return choice
49 49
50 50 def findcmd(cmd, table, strict=True):
51 51 """Return (aliases, command table entry) for command string."""
52 52 choice = findpossible(cmd, table, strict)
53 53
54 54 if cmd in choice:
55 55 return choice[cmd]
56 56
57 57 if len(choice) > 1:
58 58 clist = choice.keys()
59 59 clist.sort()
60 60 raise AmbiguousCommand(cmd, clist)
61 61
62 62 if choice:
63 63 return choice.values()[0]
64 64
65 65 raise UnknownCommand(cmd)
66 66
67 67 def bail_if_changed(repo):
68 68 if repo.dirstate.parents()[1] != nullid:
69 69 raise util.Abort(_('outstanding uncommitted merge'))
70 70 modified, added, removed, deleted = repo.status()[:4]
71 71 if modified or added or removed or deleted:
72 72 raise util.Abort(_("outstanding uncommitted changes"))
73 73
74 74 def logmessage(opts):
75 75 """ get the log message according to -m and -l option """
76 76 message = opts['message']
77 77 logfile = opts['logfile']
78 78
79 79 if message and logfile:
80 80 raise util.Abort(_('options --message and --logfile are mutually '
81 81 'exclusive'))
82 82 if not message and logfile:
83 83 try:
84 84 if logfile == '-':
85 85 message = sys.stdin.read()
86 86 else:
87 87 message = open(logfile).read()
88 88 except IOError, inst:
89 89 raise util.Abort(_("can't read commit message '%s': %s") %
90 90 (logfile, inst.strerror))
91 91 return message
92 92
93 93 def loglimit(opts):
94 94 """get the log limit according to option -l/--limit"""
95 95 limit = opts.get('limit')
96 96 if limit:
97 97 try:
98 98 limit = int(limit)
99 99 except ValueError:
100 100 raise util.Abort(_('limit must be a positive integer'))
101 101 if limit <= 0: raise util.Abort(_('limit must be positive'))
102 102 else:
103 103 limit = sys.maxint
104 104 return limit
105 105
106 106 def setremoteconfig(ui, opts):
107 107 "copy remote options to ui tree"
108 108 if opts.get('ssh'):
109 109 ui.setconfig("ui", "ssh", opts['ssh'])
110 110 if opts.get('remotecmd'):
111 111 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
112 112
113 113 def revpair(repo, revs):
114 114 '''return pair of nodes, given list of revisions. second item can
115 115 be None, meaning use working dir.'''
116 116
117 117 def revfix(repo, val, defval):
118 118 if not val and val != 0 and defval is not None:
119 119 val = defval
120 120 return repo.lookup(val)
121 121
122 122 if not revs:
123 123 return repo.dirstate.parents()[0], None
124 124 end = None
125 125 if len(revs) == 1:
126 126 if revrangesep in revs[0]:
127 127 start, end = revs[0].split(revrangesep, 1)
128 128 start = revfix(repo, start, 0)
129 129 end = revfix(repo, end, len(repo) - 1)
130 130 else:
131 131 start = revfix(repo, revs[0], None)
132 132 elif len(revs) == 2:
133 133 if revrangesep in revs[0] or revrangesep in revs[1]:
134 134 raise util.Abort(_('too many revisions specified'))
135 135 start = revfix(repo, revs[0], None)
136 136 end = revfix(repo, revs[1], None)
137 137 else:
138 138 raise util.Abort(_('too many revisions specified'))
139 139 return start, end
140 140
141 141 def revrange(repo, revs):
142 142 """Yield revision as strings from a list of revision specifications."""
143 143
144 144 def revfix(repo, val, defval):
145 145 if not val and val != 0 and defval is not None:
146 146 return defval
147 147 return repo.changelog.rev(repo.lookup(val))
148 148
149 149 seen, l = {}, []
150 150 for spec in revs:
151 151 if revrangesep in spec:
152 152 start, end = spec.split(revrangesep, 1)
153 153 start = revfix(repo, start, 0)
154 154 end = revfix(repo, end, len(repo) - 1)
155 155 step = start > end and -1 or 1
156 156 for rev in xrange(start, end+step, step):
157 157 if rev in seen:
158 158 continue
159 159 seen[rev] = 1
160 160 l.append(rev)
161 161 else:
162 162 rev = revfix(repo, spec, None)
163 163 if rev in seen:
164 164 continue
165 165 seen[rev] = 1
166 166 l.append(rev)
167 167
168 168 return l
169 169
170 170 def make_filename(repo, pat, node,
171 171 total=None, seqno=None, revwidth=None, pathname=None):
172 172 node_expander = {
173 173 'H': lambda: hex(node),
174 174 'R': lambda: str(repo.changelog.rev(node)),
175 175 'h': lambda: short(node),
176 176 }
177 177 expander = {
178 178 '%': lambda: '%',
179 179 'b': lambda: os.path.basename(repo.root),
180 180 }
181 181
182 182 try:
183 183 if node:
184 184 expander.update(node_expander)
185 185 if node:
186 186 expander['r'] = (lambda:
187 187 str(repo.changelog.rev(node)).zfill(revwidth or 0))
188 188 if total is not None:
189 189 expander['N'] = lambda: str(total)
190 190 if seqno is not None:
191 191 expander['n'] = lambda: str(seqno)
192 192 if total is not None and seqno is not None:
193 193 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
194 194 if pathname is not None:
195 195 expander['s'] = lambda: os.path.basename(pathname)
196 196 expander['d'] = lambda: os.path.dirname(pathname) or '.'
197 197 expander['p'] = lambda: pathname
198 198
199 199 newname = []
200 200 patlen = len(pat)
201 201 i = 0
202 202 while i < patlen:
203 203 c = pat[i]
204 204 if c == '%':
205 205 i += 1
206 206 c = pat[i]
207 207 c = expander[c]()
208 208 newname.append(c)
209 209 i += 1
210 210 return ''.join(newname)
211 211 except KeyError, inst:
212 212 raise util.Abort(_("invalid format spec '%%%s' in output file name") %
213 213 inst.args[0])
214 214
215 215 def make_file(repo, pat, node=None,
216 216 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
217 217
218 218 writable = 'w' in mode or 'a' in mode
219 219
220 220 if not pat or pat == '-':
221 221 return writable and sys.stdout or sys.stdin
222 222 if hasattr(pat, 'write') and writable:
223 223 return pat
224 224 if hasattr(pat, 'read') and 'r' in mode:
225 225 return pat
226 226 return open(make_filename(repo, pat, node, total, seqno, revwidth,
227 227 pathname),
228 228 mode)
229 229
230 230 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
231 231 if not globbed and default == 'relpath':
232 232 pats = util.expand_glob(pats or [])
233 233 m = _match.match(repo.root, repo.getcwd(), pats,
234 234 opts.get('include'), opts.get('exclude'), default)
235 235 def badfn(f, msg):
236 236 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
237 237 return False
238 238 m.bad = badfn
239 239 return m
240 240
241 241 def matchall(repo):
242 242 return _match.always(repo.root, repo.getcwd())
243 243
244 244 def matchfiles(repo, files):
245 245 return _match.exact(repo.root, repo.getcwd(), files)
246 246
247 247 def findrenames(repo, added=None, removed=None, threshold=0.5):
248 248 '''find renamed files -- yields (before, after, score) tuples'''
249 249 if added is None or removed is None:
250 250 added, removed = repo.status()[1:3]
251 251 ctx = repo['.']
252 252 for a in added:
253 253 aa = repo.wread(a)
254 254 bestname, bestscore = None, threshold
255 255 for r in removed:
256 256 rr = ctx.filectx(r).data()
257 257
258 258 # bdiff.blocks() returns blocks of matching lines
259 259 # count the number of bytes in each
260 260 equal = 0
261 261 alines = mdiff.splitnewlines(aa)
262 262 matches = bdiff.blocks(aa, rr)
263 263 for x1,x2,y1,y2 in matches:
264 264 for line in alines[x1:x2]:
265 265 equal += len(line)
266 266
267 267 lengths = len(aa) + len(rr)
268 268 if lengths:
269 269 myscore = equal*2.0 / lengths
270 270 if myscore >= bestscore:
271 271 bestname, bestscore = r, myscore
272 272 if bestname:
273 273 yield bestname, a, bestscore
274 274
275 275 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
276 276 if dry_run is None:
277 277 dry_run = opts.get('dry_run')
278 278 if similarity is None:
279 279 similarity = float(opts.get('similarity') or 0)
280 280 add, remove = [], []
281 281 mapping = {}
282 282 audit_path = util.path_auditor(repo.root)
283 283 m = match(repo, pats, opts)
284 284 for abs in repo.walk(m):
285 285 target = repo.wjoin(abs)
286 286 good = True
287 287 try:
288 288 audit_path(abs)
289 289 except:
290 290 good = False
291 291 rel = m.rel(abs)
292 292 exact = m.exact(abs)
293 293 if good and abs not in repo.dirstate:
294 294 add.append(abs)
295 295 mapping[abs] = rel, m.exact(abs)
296 296 if repo.ui.verbose or not exact:
297 297 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
298 298 if repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
299 299 or (os.path.isdir(target) and not os.path.islink(target))):
300 300 remove.append(abs)
301 301 mapping[abs] = rel, exact
302 302 if repo.ui.verbose or not exact:
303 303 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
304 304 if not dry_run:
305 305 repo.remove(remove)
306 306 repo.add(add)
307 307 if similarity > 0:
308 308 for old, new, score in findrenames(repo, add, remove, similarity):
309 309 oldrel, oldexact = mapping[old]
310 310 newrel, newexact = mapping[new]
311 311 if repo.ui.verbose or not oldexact or not newexact:
312 312 repo.ui.status(_('recording removal of %s as rename to %s '
313 313 '(%d%% similar)\n') %
314 314 (oldrel, newrel, score * 100))
315 315 if not dry_run:
316 316 repo.copy(old, new)
317 317
318 318 def copy(ui, repo, pats, opts, rename=False):
319 319 # called with the repo lock held
320 320 #
321 321 # hgsep => pathname that uses "/" to separate directories
322 322 # ossep => pathname that uses os.sep to separate directories
323 323 cwd = repo.getcwd()
324 324 targets = {}
325 325 after = opts.get("after")
326 326 dryrun = opts.get("dry_run")
327 327
328 328 def walkpat(pat):
329 329 srcs = []
330 330 m = match(repo, [pat], opts, globbed=True)
331 331 for abs in repo.walk(m):
332 332 state = repo.dirstate[abs]
333 333 rel = m.rel(abs)
334 334 exact = m.exact(abs)
335 335 if state in '?r':
336 336 if exact and state == '?':
337 337 ui.warn(_('%s: not copying - file is not managed\n') % rel)
338 338 if exact and state == 'r':
339 339 ui.warn(_('%s: not copying - file has been marked for'
340 340 ' remove\n') % rel)
341 341 continue
342 342 # abs: hgsep
343 343 # rel: ossep
344 344 srcs.append((abs, rel, exact))
345 345 return srcs
346 346
347 347 # abssrc: hgsep
348 348 # relsrc: ossep
349 349 # otarget: ossep
350 350 def copyfile(abssrc, relsrc, otarget, exact):
351 351 abstarget = util.canonpath(repo.root, cwd, otarget)
352 352 reltarget = repo.pathto(abstarget, cwd)
353 353 target = repo.wjoin(abstarget)
354 354 src = repo.wjoin(abssrc)
355 355 state = repo.dirstate[abstarget]
356 356
357 357 # check for collisions
358 358 prevsrc = targets.get(abstarget)
359 359 if prevsrc is not None:
360 360 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
361 361 (reltarget, repo.pathto(abssrc, cwd),
362 362 repo.pathto(prevsrc, cwd)))
363 363 return
364 364
365 365 # check for overwrites
366 366 exists = os.path.exists(target)
367 367 if (not after and exists or after and state in 'mn'):
368 368 if not opts['force']:
369 369 ui.warn(_('%s: not overwriting - file exists\n') %
370 370 reltarget)
371 371 return
372 372
373 373 if after:
374 374 if not exists:
375 375 return
376 376 elif not dryrun:
377 377 try:
378 378 if exists:
379 379 os.unlink(target)
380 380 targetdir = os.path.dirname(target) or '.'
381 381 if not os.path.isdir(targetdir):
382 382 os.makedirs(targetdir)
383 383 util.copyfile(src, target)
384 384 except IOError, inst:
385 385 if inst.errno == errno.ENOENT:
386 386 ui.warn(_('%s: deleted in working copy\n') % relsrc)
387 387 else:
388 388 ui.warn(_('%s: cannot copy - %s\n') %
389 389 (relsrc, inst.strerror))
390 390 return True # report a failure
391 391
392 392 if ui.verbose or not exact:
393 393 action = rename and "moving" or "copying"
394 394 ui.status(_('%s %s to %s\n') % (action, relsrc, reltarget))
395 395
396 396 targets[abstarget] = abssrc
397 397
398 398 # fix up dirstate
399 399 origsrc = repo.dirstate.copied(abssrc) or abssrc
400 400 if abstarget == origsrc: # copying back a copy?
401 401 if state not in 'mn' and not dryrun:
402 402 repo.dirstate.normallookup(abstarget)
403 403 else:
404 404 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
405 405 if not ui.quiet:
406 406 ui.warn(_("%s has not been committed yet, so no copy "
407 407 "data will be stored for %s.\n")
408 408 % (repo.pathto(origsrc, cwd), reltarget))
409 409 if repo.dirstate[abstarget] in '?r' and not dryrun:
410 410 repo.add([abstarget])
411 411 elif not dryrun:
412 412 repo.copy(origsrc, abstarget)
413 413
414 414 if rename and not dryrun:
415 415 repo.remove([abssrc], not after)
416 416
417 417 # pat: ossep
418 418 # dest ossep
419 419 # srcs: list of (hgsep, hgsep, ossep, bool)
420 420 # return: function that takes hgsep and returns ossep
421 421 def targetpathfn(pat, dest, srcs):
422 422 if os.path.isdir(pat):
423 423 abspfx = util.canonpath(repo.root, cwd, pat)
424 424 abspfx = util.localpath(abspfx)
425 425 if destdirexists:
426 426 striplen = len(os.path.split(abspfx)[0])
427 427 else:
428 428 striplen = len(abspfx)
429 429 if striplen:
430 430 striplen += len(os.sep)
431 431 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
432 432 elif destdirexists:
433 433 res = lambda p: os.path.join(dest,
434 434 os.path.basename(util.localpath(p)))
435 435 else:
436 436 res = lambda p: dest
437 437 return res
438 438
439 439 # pat: ossep
440 440 # dest ossep
441 441 # srcs: list of (hgsep, hgsep, ossep, bool)
442 442 # return: function that takes hgsep and returns ossep
443 443 def targetpathafterfn(pat, dest, srcs):
444 444 if util.patkind(pat, None)[0]:
445 445 # a mercurial pattern
446 446 res = lambda p: os.path.join(dest,
447 447 os.path.basename(util.localpath(p)))
448 448 else:
449 449 abspfx = util.canonpath(repo.root, cwd, pat)
450 450 if len(abspfx) < len(srcs[0][0]):
451 451 # A directory. Either the target path contains the last
452 452 # component of the source path or it does not.
453 453 def evalpath(striplen):
454 454 score = 0
455 455 for s in srcs:
456 456 t = os.path.join(dest, util.localpath(s[0])[striplen:])
457 457 if os.path.exists(t):
458 458 score += 1
459 459 return score
460 460
461 461 abspfx = util.localpath(abspfx)
462 462 striplen = len(abspfx)
463 463 if striplen:
464 464 striplen += len(os.sep)
465 465 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
466 466 score = evalpath(striplen)
467 467 striplen1 = len(os.path.split(abspfx)[0])
468 468 if striplen1:
469 469 striplen1 += len(os.sep)
470 470 if evalpath(striplen1) > score:
471 471 striplen = striplen1
472 472 res = lambda p: os.path.join(dest,
473 473 util.localpath(p)[striplen:])
474 474 else:
475 475 # a file
476 476 if destdirexists:
477 477 res = lambda p: os.path.join(dest,
478 478 os.path.basename(util.localpath(p)))
479 479 else:
480 480 res = lambda p: dest
481 481 return res
482 482
483 483
484 484 pats = util.expand_glob(pats)
485 485 if not pats:
486 486 raise util.Abort(_('no source or destination specified'))
487 487 if len(pats) == 1:
488 488 raise util.Abort(_('no destination specified'))
489 489 dest = pats.pop()
490 490 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
491 491 if not destdirexists:
492 492 if len(pats) > 1 or util.patkind(pats[0], None)[0]:
493 493 raise util.Abort(_('with multiple sources, destination must be an '
494 494 'existing directory'))
495 495 if util.endswithsep(dest):
496 496 raise util.Abort(_('destination %s is not a directory') % dest)
497 497
498 498 tfn = targetpathfn
499 499 if after:
500 500 tfn = targetpathafterfn
501 501 copylist = []
502 502 for pat in pats:
503 503 srcs = walkpat(pat)
504 504 if not srcs:
505 505 continue
506 506 copylist.append((tfn(pat, dest, srcs), srcs))
507 507 if not copylist:
508 508 raise util.Abort(_('no files to copy'))
509 509
510 510 errors = 0
511 511 for targetpath, srcs in copylist:
512 512 for abssrc, relsrc, exact in srcs:
513 513 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
514 514 errors += 1
515 515
516 516 if errors:
517 517 ui.warn(_('(consider using --after)\n'))
518 518
519 519 return errors
520 520
521 521 def service(opts, parentfn=None, initfn=None, runfn=None):
522 522 '''Run a command as a service.'''
523 523
524 524 if opts['daemon'] and not opts['daemon_pipefds']:
525 525 rfd, wfd = os.pipe()
526 526 args = sys.argv[:]
527 527 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
528 528 # Don't pass --cwd to the child process, because we've already
529 529 # changed directory.
530 530 for i in xrange(1,len(args)):
531 531 if args[i].startswith('--cwd='):
532 532 del args[i]
533 533 break
534 534 elif args[i].startswith('--cwd'):
535 535 del args[i:i+2]
536 536 break
537 537 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
538 538 args[0], args)
539 539 os.close(wfd)
540 540 os.read(rfd, 1)
541 541 if parentfn:
542 542 return parentfn(pid)
543 543 else:
544 544 os._exit(0)
545 545
546 546 if initfn:
547 547 initfn()
548 548
549 549 if opts['pid_file']:
550 550 fp = open(opts['pid_file'], 'w')
551 551 fp.write(str(os.getpid()) + '\n')
552 552 fp.close()
553 553
554 554 if opts['daemon_pipefds']:
555 555 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
556 556 os.close(rfd)
557 557 try:
558 558 os.setsid()
559 559 except AttributeError:
560 560 pass
561 561 os.write(wfd, 'y')
562 562 os.close(wfd)
563 563 sys.stdout.flush()
564 564 sys.stderr.flush()
565 565 fd = os.open(util.nulldev, os.O_RDWR)
566 566 if fd != 0: os.dup2(fd, 0)
567 567 if fd != 1: os.dup2(fd, 1)
568 568 if fd != 2: os.dup2(fd, 2)
569 569 if fd not in (0, 1, 2): os.close(fd)
570 570
571 571 if runfn:
572 572 return runfn()
573 573
574 574 class changeset_printer(object):
575 575 '''show changeset information when templating not requested.'''
576 576
577 577 def __init__(self, ui, repo, patch, buffered):
578 578 self.ui = ui
579 579 self.repo = repo
580 580 self.buffered = buffered
581 581 self.patch = patch
582 582 self.header = {}
583 583 self.hunk = {}
584 584 self.lastheader = None
585 585
586 586 def flush(self, rev):
587 587 if rev in self.header:
588 588 h = self.header[rev]
589 589 if h != self.lastheader:
590 590 self.lastheader = h
591 591 self.ui.write(h)
592 592 del self.header[rev]
593 593 if rev in self.hunk:
594 594 self.ui.write(self.hunk[rev])
595 595 del self.hunk[rev]
596 596 return 1
597 597 return 0
598 598
599 599 def show(self, rev=0, changenode=None, copies=(), **props):
600 600 if self.buffered:
601 601 self.ui.pushbuffer()
602 602 self._show(rev, changenode, copies, props)
603 603 self.hunk[rev] = self.ui.popbuffer()
604 604 else:
605 605 self._show(rev, changenode, copies, props)
606 606
607 607 def _show(self, rev, changenode, copies, props):
608 608 '''show a single changeset or file revision'''
609 609 log = self.repo.changelog
610 610 if changenode is None:
611 611 changenode = log.node(rev)
612 612 elif not rev:
613 613 rev = log.rev(changenode)
614 614
615 615 if self.ui.quiet:
616 616 self.ui.write("%d:%s\n" % (rev, short(changenode)))
617 617 return
618 618
619 619 changes = log.read(changenode)
620 620 date = util.datestr(changes[2])
621 621 extra = changes[5]
622 622 branch = extra.get("branch")
623 623
624 624 hexfunc = self.ui.debugflag and hex or short
625 625
626 626 parents = [(p, hexfunc(log.node(p)))
627 627 for p in self._meaningful_parentrevs(log, rev)]
628 628
629 629 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
630 630
631 631 # don't show the default branch name
632 632 if branch != 'default':
633 633 branch = util.tolocal(branch)
634 634 self.ui.write(_("branch: %s\n") % branch)
635 635 for tag in self.repo.nodetags(changenode):
636 636 self.ui.write(_("tag: %s\n") % tag)
637 637 for parent in parents:
638 638 self.ui.write(_("parent: %d:%s\n") % parent)
639 639
640 640 if self.ui.debugflag:
641 641 self.ui.write(_("manifest: %d:%s\n") %
642 642 (self.repo.manifest.rev(changes[0]), hex(changes[0])))
643 643 self.ui.write(_("user: %s\n") % changes[1])
644 644 self.ui.write(_("date: %s\n") % date)
645 645
646 646 if self.ui.debugflag:
647 647 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
648 648 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
649 649 files):
650 650 if value:
651 651 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
652 652 elif changes[3] and self.ui.verbose:
653 653 self.ui.write(_("files: %s\n") % " ".join(changes[3]))
654 654 if copies and self.ui.verbose:
655 655 copies = ['%s (%s)' % c for c in copies]
656 656 self.ui.write(_("copies: %s\n") % ' '.join(copies))
657 657
658 658 if extra and self.ui.debugflag:
659 659 for key, value in util.sort(extra.items()):
660 660 self.ui.write(_("extra: %s=%s\n")
661 661 % (key, value.encode('string_escape')))
662 662
663 663 description = changes[4].strip()
664 664 if description:
665 665 if self.ui.verbose:
666 666 self.ui.write(_("description:\n"))
667 667 self.ui.write(description)
668 668 self.ui.write("\n\n")
669 669 else:
670 670 self.ui.write(_("summary: %s\n") %
671 671 description.splitlines()[0])
672 672 self.ui.write("\n")
673 673
674 674 self.showpatch(changenode)
675 675
676 676 def showpatch(self, node):
677 677 if self.patch:
678 678 prev = self.repo.changelog.parents(node)[0]
679 679 chunks = patch.diff(self.repo, prev, node, match=self.patch,
680 680 opts=patch.diffopts(self.ui))
681 681 for chunk in chunks:
682 682 self.ui.write(chunk)
683 683 self.ui.write("\n")
684 684
685 685 def _meaningful_parentrevs(self, log, rev):
686 686 """Return list of meaningful (or all if debug) parentrevs for rev.
687 687
688 688 For merges (two non-nullrev revisions) both parents are meaningful.
689 689 Otherwise the first parent revision is considered meaningful if it
690 690 is not the preceding revision.
691 691 """
692 692 parents = log.parentrevs(rev)
693 693 if not self.ui.debugflag and parents[1] == nullrev:
694 694 if parents[0] >= rev - 1:
695 695 parents = []
696 696 else:
697 697 parents = [parents[0]]
698 698 return parents
699 699
700 700
701 701 class changeset_templater(changeset_printer):
702 702 '''format changeset information.'''
703 703
704 704 def __init__(self, ui, repo, patch, mapfile, buffered):
705 705 changeset_printer.__init__(self, ui, repo, patch, buffered)
706 706 filters = templatefilters.filters.copy()
707 707 filters['formatnode'] = (ui.debugflag and (lambda x: x)
708 708 or (lambda x: x[:12]))
709 709 self.t = templater.templater(mapfile, filters,
710 710 cache={
711 711 'parent': '{rev}:{node|formatnode} ',
712 712 'manifest': '{rev}:{node|formatnode}',
713 713 'filecopy': '{name} ({source})'})
714 714
715 715 def use_template(self, t):
716 716 '''set template string to use'''
717 717 self.t.cache['changeset'] = t
718 718
719 719 def _show(self, rev, changenode, copies, props):
720 720 '''show a single changeset or file revision'''
721 721 log = self.repo.changelog
722 722 if changenode is None:
723 723 changenode = log.node(rev)
724 724 elif not rev:
725 725 rev = log.rev(changenode)
726 726
727 727 changes = log.read(changenode)
728 728
729 729 def showlist(name, values, plural=None, **args):
730 730 '''expand set of values.
731 731 name is name of key in template map.
732 732 values is list of strings or dicts.
733 733 plural is plural of name, if not simply name + 's'.
734 734
735 735 expansion works like this, given name 'foo'.
736 736
737 737 if values is empty, expand 'no_foos'.
738 738
739 739 if 'foo' not in template map, return values as a string,
740 740 joined by space.
741 741
742 742 expand 'start_foos'.
743 743
744 744 for each value, expand 'foo'. if 'last_foo' in template
745 745 map, expand it instead of 'foo' for last key.
746 746
747 747 expand 'end_foos'.
748 748 '''
749 749 if plural: names = plural
750 750 else: names = name + 's'
751 751 if not values:
752 752 noname = 'no_' + names
753 753 if noname in self.t:
754 754 yield self.t(noname, **args)
755 755 return
756 756 if name not in self.t:
757 757 if isinstance(values[0], str):
758 758 yield ' '.join(values)
759 759 else:
760 760 for v in values:
761 761 yield dict(v, **args)
762 762 return
763 763 startname = 'start_' + names
764 764 if startname in self.t:
765 765 yield self.t(startname, **args)
766 766 vargs = args.copy()
767 767 def one(v, tag=name):
768 768 try:
769 769 vargs.update(v)
770 770 except (AttributeError, ValueError):
771 771 try:
772 772 for a, b in v:
773 773 vargs[a] = b
774 774 except ValueError:
775 775 vargs[name] = v
776 776 return self.t(tag, **vargs)
777 777 lastname = 'last_' + name
778 778 if lastname in self.t:
779 779 last = values.pop()
780 780 else:
781 781 last = None
782 782 for v in values:
783 783 yield one(v)
784 784 if last is not None:
785 785 yield one(last, tag=lastname)
786 786 endname = 'end_' + names
787 787 if endname in self.t:
788 788 yield self.t(endname, **args)
789 789
790 790 def showbranches(**args):
791 791 branch = changes[5].get("branch")
792 792 if branch != 'default':
793 793 branch = util.tolocal(branch)
794 794 return showlist('branch', [branch], plural='branches', **args)
795 795
796 796 def showparents(**args):
797 797 parents = [[('rev', p), ('node', hex(log.node(p)))]
798 798 for p in self._meaningful_parentrevs(log, rev)]
799 799 return showlist('parent', parents, **args)
800 800
801 801 def showtags(**args):
802 802 return showlist('tag', self.repo.nodetags(changenode), **args)
803 803
804 804 def showextras(**args):
805 805 for key, value in util.sort(changes[5].items()):
806 806 args = args.copy()
807 807 args.update(dict(key=key, value=value))
808 808 yield self.t('extra', **args)
809 809
810 810 def showcopies(**args):
811 811 c = [{'name': x[0], 'source': x[1]} for x in copies]
812 812 return showlist('file_copy', c, plural='file_copies', **args)
813 813
814 814 files = []
815 815 def getfiles():
816 816 if not files:
817 817 files[:] = self.repo.status(
818 818 log.parents(changenode)[0], changenode)[:3]
819 819 return files
820 820 def showfiles(**args):
821 821 return showlist('file', changes[3], **args)
822 822 def showmods(**args):
823 823 return showlist('file_mod', getfiles()[0], **args)
824 824 def showadds(**args):
825 825 return showlist('file_add', getfiles()[1], **args)
826 826 def showdels(**args):
827 827 return showlist('file_del', getfiles()[2], **args)
828 828 def showmanifest(**args):
829 829 args = args.copy()
830 830 args.update(dict(rev=self.repo.manifest.rev(changes[0]),
831 831 node=hex(changes[0])))
832 832 return self.t('manifest', **args)
833 833
834 834 defprops = {
835 835 'author': changes[1],
836 836 'branches': showbranches,
837 837 'date': changes[2],
838 838 'desc': changes[4].strip(),
839 839 'file_adds': showadds,
840 840 'file_dels': showdels,
841 841 'file_mods': showmods,
842 842 'files': showfiles,
843 843 'file_copies': showcopies,
844 844 'manifest': showmanifest,
845 845 'node': hex(changenode),
846 846 'parents': showparents,
847 847 'rev': rev,
848 848 'tags': showtags,
849 849 'extras': showextras,
850 850 }
851 851 props = props.copy()
852 852 props.update(defprops)
853 853
854 854 try:
855 855 if self.ui.debugflag and 'header_debug' in self.t:
856 856 key = 'header_debug'
857 857 elif self.ui.quiet and 'header_quiet' in self.t:
858 858 key = 'header_quiet'
859 859 elif self.ui.verbose and 'header_verbose' in self.t:
860 860 key = 'header_verbose'
861 861 elif 'header' in self.t:
862 862 key = 'header'
863 863 else:
864 864 key = ''
865 865 if key:
866 866 h = templater.stringify(self.t(key, **props))
867 867 if self.buffered:
868 868 self.header[rev] = h
869 869 else:
870 870 self.ui.write(h)
871 871 if self.ui.debugflag and 'changeset_debug' in self.t:
872 872 key = 'changeset_debug'
873 873 elif self.ui.quiet and 'changeset_quiet' in self.t:
874 874 key = 'changeset_quiet'
875 875 elif self.ui.verbose and 'changeset_verbose' in self.t:
876 876 key = 'changeset_verbose'
877 877 else:
878 878 key = 'changeset'
879 879 self.ui.write(templater.stringify(self.t(key, **props)))
880 880 self.showpatch(changenode)
881 881 except KeyError, inst:
882 882 raise util.Abort(_("%s: no key named '%s'") % (self.t.mapfile,
883 883 inst.args[0]))
884 884 except SyntaxError, inst:
885 885 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
886 886
887 887 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
888 888 """show one changeset using template or regular display.
889 889
890 890 Display format will be the first non-empty hit of:
891 891 1. option 'template'
892 892 2. option 'style'
893 893 3. [ui] setting 'logtemplate'
894 894 4. [ui] setting 'style'
895 895 If all of these values are either the unset or the empty string,
896 896 regular display via changeset_printer() is done.
897 897 """
898 898 # options
899 899 patch = False
900 900 if opts.get('patch'):
901 901 patch = matchfn or matchall(repo)
902 902
903 903 tmpl = opts.get('template')
904 904 mapfile = None
905 905 if tmpl:
906 906 tmpl = templater.parsestring(tmpl, quoted=False)
907 907 else:
908 908 mapfile = opts.get('style')
909 909 # ui settings
910 910 if not mapfile:
911 911 tmpl = ui.config('ui', 'logtemplate')
912 912 if tmpl:
913 913 tmpl = templater.parsestring(tmpl)
914 914 else:
915 915 mapfile = ui.config('ui', 'style')
916 916
917 917 if tmpl or mapfile:
918 918 if mapfile:
919 919 if not os.path.split(mapfile)[0]:
920 920 mapname = (templater.templatepath('map-cmdline.' + mapfile)
921 921 or templater.templatepath(mapfile))
922 922 if mapname: mapfile = mapname
923 923 try:
924 924 t = changeset_templater(ui, repo, patch, mapfile, buffered)
925 925 except SyntaxError, inst:
926 926 raise util.Abort(inst.args[0])
927 927 if tmpl: t.use_template(tmpl)
928 928 return t
929 929 return changeset_printer(ui, repo, patch, buffered)
930 930
931 931 def finddate(ui, repo, date):
932 932 """Find the tipmost changeset that matches the given date spec"""
933 933 df = util.matchdate(date)
934 934 get = util.cachefunc(lambda r: repo[r].changeset())
935 935 changeiter, matchfn = walkchangerevs(ui, repo, [], get, {'rev':None})
936 936 results = {}
937 937 for st, rev, fns in changeiter:
938 938 if st == 'add':
939 939 d = get(rev)[2]
940 940 if df(d[0]):
941 941 results[rev] = d
942 942 elif st == 'iter':
943 943 if rev in results:
944 944 ui.status(_("Found revision %s from %s\n") %
945 945 (rev, util.datestr(results[rev])))
946 946 return str(rev)
947 947
948 948 raise util.Abort(_("revision matching date not found"))
949 949
950 950 def walkchangerevs(ui, repo, pats, change, opts):
951 951 '''Iterate over files and the revs they changed in.
952 952
953 953 Callers most commonly need to iterate backwards over the history
954 954 it is interested in. Doing so has awful (quadratic-looking)
955 955 performance, so we use iterators in a "windowed" way.
956 956
957 957 We walk a window of revisions in the desired order. Within the
958 958 window, we first walk forwards to gather data, then in the desired
959 959 order (usually backwards) to display it.
960 960
961 961 This function returns an (iterator, matchfn) tuple. The iterator
962 962 yields 3-tuples. They will be of one of the following forms:
963 963
964 964 "window", incrementing, lastrev: stepping through a window,
965 965 positive if walking forwards through revs, last rev in the
966 966 sequence iterated over - use to reset state for the current window
967 967
968 968 "add", rev, fns: out-of-order traversal of the given file names
969 969 fns, which changed during revision rev - use to gather data for
970 970 possible display
971 971
972 972 "iter", rev, None: in-order traversal of the revs earlier iterated
973 973 over with "add" - use to display data'''
974 974
975 975 def increasing_windows(start, end, windowsize=8, sizelimit=512):
976 976 if start < end:
977 977 while start < end:
978 978 yield start, min(windowsize, end-start)
979 979 start += windowsize
980 980 if windowsize < sizelimit:
981 981 windowsize *= 2
982 982 else:
983 983 while start > end:
984 984 yield start, min(windowsize, start-end-1)
985 985 start -= windowsize
986 986 if windowsize < sizelimit:
987 987 windowsize *= 2
988 988
989 989 m = match(repo, pats, opts)
990 990 follow = opts.get('follow') or opts.get('follow_first')
991 991
992 992 if not len(repo):
993 993 return [], m
994 994
995 995 if follow:
996 996 defrange = '%s:0' % repo['.'].rev()
997 997 else:
998 998 defrange = '-1:0'
999 999 revs = revrange(repo, opts['rev'] or [defrange])
1000 1000 wanted = {}
1001 1001 slowpath = m.anypats() or opts.get('removed')
1002 1002 fncache = {}
1003 1003
1004 1004 if not slowpath and not m.files():
1005 1005 # No files, no patterns. Display all revs.
1006 1006 wanted = dict.fromkeys(revs)
1007 1007 copies = []
1008 1008 if not slowpath:
1009 1009 # Only files, no patterns. Check the history of each file.
1010 1010 def filerevgen(filelog, node):
1011 1011 cl_count = len(repo)
1012 1012 if node is None:
1013 1013 last = len(filelog) - 1
1014 1014 else:
1015 1015 last = filelog.rev(node)
1016 1016 for i, window in increasing_windows(last, nullrev):
1017 1017 revs = []
1018 1018 for j in xrange(i - window, i + 1):
1019 1019 n = filelog.node(j)
1020 revs.append((filelog.linkrev(n),
1020 revs.append((filelog.linkrev(j),
1021 1021 follow and filelog.renamed(n)))
1022 1022 revs.reverse()
1023 1023 for rev in revs:
1024 1024 # only yield rev for which we have the changelog, it can
1025 1025 # happen while doing "hg log" during a pull or commit
1026 1026 if rev[0] < cl_count:
1027 1027 yield rev
1028 1028 def iterfiles():
1029 1029 for filename in m.files():
1030 1030 yield filename, None
1031 1031 for filename_node in copies:
1032 1032 yield filename_node
1033 1033 minrev, maxrev = min(revs), max(revs)
1034 1034 for file_, node in iterfiles():
1035 1035 filelog = repo.file(file_)
1036 1036 if not len(filelog):
1037 1037 if node is None:
1038 1038 # A zero count may be a directory or deleted file, so
1039 1039 # try to find matching entries on the slow path.
1040 1040 slowpath = True
1041 1041 break
1042 1042 else:
1043 1043 ui.warn(_('%s:%s copy source revision cannot be found!\n')
1044 1044 % (file_, short(node)))
1045 1045 continue
1046 1046 for rev, copied in filerevgen(filelog, node):
1047 1047 if rev <= maxrev:
1048 1048 if rev < minrev:
1049 1049 break
1050 1050 fncache.setdefault(rev, [])
1051 1051 fncache[rev].append(file_)
1052 1052 wanted[rev] = 1
1053 1053 if follow and copied:
1054 1054 copies.append(copied)
1055 1055 if slowpath:
1056 1056 if follow:
1057 1057 raise util.Abort(_('can only follow copies/renames for explicit '
1058 1058 'file names'))
1059 1059
1060 1060 # The slow path checks files modified in every changeset.
1061 1061 def changerevgen():
1062 1062 for i, window in increasing_windows(len(repo) - 1, nullrev):
1063 1063 for j in xrange(i - window, i + 1):
1064 1064 yield j, change(j)[3]
1065 1065
1066 1066 for rev, changefiles in changerevgen():
1067 1067 matches = filter(m, changefiles)
1068 1068 if matches:
1069 1069 fncache[rev] = matches
1070 1070 wanted[rev] = 1
1071 1071
1072 1072 class followfilter:
1073 1073 def __init__(self, onlyfirst=False):
1074 1074 self.startrev = nullrev
1075 1075 self.roots = []
1076 1076 self.onlyfirst = onlyfirst
1077 1077
1078 1078 def match(self, rev):
1079 1079 def realparents(rev):
1080 1080 if self.onlyfirst:
1081 1081 return repo.changelog.parentrevs(rev)[0:1]
1082 1082 else:
1083 1083 return filter(lambda x: x != nullrev,
1084 1084 repo.changelog.parentrevs(rev))
1085 1085
1086 1086 if self.startrev == nullrev:
1087 1087 self.startrev = rev
1088 1088 return True
1089 1089
1090 1090 if rev > self.startrev:
1091 1091 # forward: all descendants
1092 1092 if not self.roots:
1093 1093 self.roots.append(self.startrev)
1094 1094 for parent in realparents(rev):
1095 1095 if parent in self.roots:
1096 1096 self.roots.append(rev)
1097 1097 return True
1098 1098 else:
1099 1099 # backwards: all parents
1100 1100 if not self.roots:
1101 1101 self.roots.extend(realparents(self.startrev))
1102 1102 if rev in self.roots:
1103 1103 self.roots.remove(rev)
1104 1104 self.roots.extend(realparents(rev))
1105 1105 return True
1106 1106
1107 1107 return False
1108 1108
1109 1109 # it might be worthwhile to do this in the iterator if the rev range
1110 1110 # is descending and the prune args are all within that range
1111 1111 for rev in opts.get('prune', ()):
1112 1112 rev = repo.changelog.rev(repo.lookup(rev))
1113 1113 ff = followfilter()
1114 1114 stop = min(revs[0], revs[-1])
1115 1115 for x in xrange(rev, stop-1, -1):
1116 1116 if ff.match(x) and x in wanted:
1117 1117 del wanted[x]
1118 1118
1119 1119 def iterate():
1120 1120 if follow and not m.files():
1121 1121 ff = followfilter(onlyfirst=opts.get('follow_first'))
1122 1122 def want(rev):
1123 1123 if ff.match(rev) and rev in wanted:
1124 1124 return True
1125 1125 return False
1126 1126 else:
1127 1127 def want(rev):
1128 1128 return rev in wanted
1129 1129
1130 1130 for i, window in increasing_windows(0, len(revs)):
1131 1131 yield 'window', revs[0] < revs[-1], revs[-1]
1132 1132 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1133 1133 for rev in util.sort(list(nrevs)):
1134 1134 fns = fncache.get(rev)
1135 1135 if not fns:
1136 1136 def fns_generator():
1137 1137 for f in change(rev)[3]:
1138 1138 if m(f):
1139 1139 yield f
1140 1140 fns = fns_generator()
1141 1141 yield 'add', rev, fns
1142 1142 for rev in nrevs:
1143 1143 yield 'iter', rev, None
1144 1144 return iterate(), m
1145 1145
1146 1146 def commit(ui, repo, commitfunc, pats, opts):
1147 1147 '''commit the specified files or all outstanding changes'''
1148 1148 date = opts.get('date')
1149 1149 if date:
1150 1150 opts['date'] = util.parsedate(date)
1151 1151 message = logmessage(opts)
1152 1152
1153 1153 # extract addremove carefully -- this function can be called from a command
1154 1154 # that doesn't support addremove
1155 1155 if opts.get('addremove'):
1156 1156 addremove(repo, pats, opts)
1157 1157
1158 1158 m = match(repo, pats, opts)
1159 1159 if pats:
1160 1160 modified, added, removed = repo.status(match=m)[:3]
1161 1161 files = util.sort(modified + added + removed)
1162 1162
1163 1163 def is_dir(f):
1164 1164 name = f + '/'
1165 1165 i = bisect.bisect(files, name)
1166 1166 return i < len(files) and files[i].startswith(name)
1167 1167
1168 1168 for f in m.files():
1169 1169 if f == '.':
1170 1170 continue
1171 1171 if f not in files:
1172 1172 rf = repo.wjoin(f)
1173 1173 rel = repo.pathto(f)
1174 1174 try:
1175 1175 mode = os.lstat(rf)[stat.ST_MODE]
1176 1176 except OSError:
1177 1177 if is_dir(f): # deleted directory ?
1178 1178 continue
1179 1179 raise util.Abort(_("file %s not found!") % rel)
1180 1180 if stat.S_ISDIR(mode):
1181 1181 if not is_dir(f):
1182 1182 raise util.Abort(_("no match under directory %s!")
1183 1183 % rel)
1184 1184 elif not (stat.S_ISREG(mode) or stat.S_ISLNK(mode)):
1185 1185 raise util.Abort(_("can't commit %s: "
1186 1186 "unsupported file type!") % rel)
1187 1187 elif f not in repo.dirstate:
1188 1188 raise util.Abort(_("file %s not tracked!") % rel)
1189 1189 m = matchfiles(repo, files)
1190 1190 try:
1191 1191 return commitfunc(ui, repo, message, m, opts)
1192 1192 except ValueError, inst:
1193 1193 raise util.Abort(str(inst))
@@ -1,3384 +1,3384 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from repo import RepoError, NoCapability
10 10 from i18n import _, gettext
11 11 import os, re, sys
12 12 import hg, util, revlog, bundlerepo, extensions, copies
13 13 import difflib, patch, time, help, mdiff, tempfile, url
14 14 import version
15 15 import archival, changegroup, cmdutil, hgweb.server, sshserver, hbisect
16 16 import merge as merge_
17 17
18 18 # Commands start here, listed alphabetically
19 19
20 20 def add(ui, repo, *pats, **opts):
21 21 """add the specified files on the next commit
22 22
23 23 Schedule files to be version controlled and added to the repository.
24 24
25 25 The files will be added to the repository at the next commit. To
26 26 undo an add before that, see hg revert.
27 27
28 28 If no names are given, add all files in the repository.
29 29 """
30 30
31 31 rejected = None
32 32 exacts = {}
33 33 names = []
34 34 m = cmdutil.match(repo, pats, opts)
35 35 m.bad = lambda x,y: True
36 36 for abs in repo.walk(m):
37 37 if m.exact(abs):
38 38 if ui.verbose:
39 39 ui.status(_('adding %s\n') % m.rel(abs))
40 40 names.append(abs)
41 41 exacts[abs] = 1
42 42 elif abs not in repo.dirstate:
43 43 ui.status(_('adding %s\n') % m.rel(abs))
44 44 names.append(abs)
45 45 if not opts.get('dry_run'):
46 46 rejected = repo.add(names)
47 47 rejected = [p for p in rejected if p in exacts]
48 48 return rejected and 1 or 0
49 49
50 50 def addremove(ui, repo, *pats, **opts):
51 51 """add all new files, delete all missing files
52 52
53 53 Add all new files and remove all missing files from the repository.
54 54
55 55 New files are ignored if they match any of the patterns in .hgignore. As
56 56 with add, these changes take effect at the next commit.
57 57
58 58 Use the -s option to detect renamed files. With a parameter > 0,
59 59 this compares every removed file with every added file and records
60 60 those similar enough as renames. This option takes a percentage
61 61 between 0 (disabled) and 100 (files must be identical) as its
62 62 parameter. Detecting renamed files this way can be expensive.
63 63 """
64 64 try:
65 65 sim = float(opts.get('similarity') or 0)
66 66 except ValueError:
67 67 raise util.Abort(_('similarity must be a number'))
68 68 if sim < 0 or sim > 100:
69 69 raise util.Abort(_('similarity must be between 0 and 100'))
70 70 return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
71 71
72 72 def annotate(ui, repo, *pats, **opts):
73 73 """show changeset information per file line
74 74
75 75 List changes in files, showing the revision id responsible for each line
76 76
77 77 This command is useful to discover who did a change or when a change took
78 78 place.
79 79
80 80 Without the -a option, annotate will avoid processing files it
81 81 detects as binary. With -a, annotate will generate an annotation
82 82 anyway, probably with undesirable results.
83 83 """
84 84 datefunc = ui.quiet and util.shortdate or util.datestr
85 85 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
86 86
87 87 if not pats:
88 88 raise util.Abort(_('at least one file name or pattern required'))
89 89
90 90 opmap = [('user', lambda x: ui.shortuser(x[0].user())),
91 91 ('number', lambda x: str(x[0].rev())),
92 92 ('changeset', lambda x: short(x[0].node())),
93 93 ('date', getdate),
94 94 ('follow', lambda x: x[0].path()),
95 95 ]
96 96
97 97 if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
98 98 and not opts.get('follow')):
99 99 opts['number'] = 1
100 100
101 101 linenumber = opts.get('line_number') is not None
102 102 if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
103 103 raise util.Abort(_('at least one of -n/-c is required for -l'))
104 104
105 105 funcmap = [func for op, func in opmap if opts.get(op)]
106 106 if linenumber:
107 107 lastfunc = funcmap[-1]
108 108 funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
109 109
110 110 ctx = repo[opts.get('rev')]
111 111
112 112 m = cmdutil.match(repo, pats, opts)
113 113 for abs in ctx.walk(m):
114 114 fctx = ctx[abs]
115 115 if not opts.get('text') and util.binary(fctx.data()):
116 116 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
117 117 continue
118 118
119 119 lines = fctx.annotate(follow=opts.get('follow'),
120 120 linenumber=linenumber)
121 121 pieces = []
122 122
123 123 for f in funcmap:
124 124 l = [f(n) for n, dummy in lines]
125 125 if l:
126 126 ml = max(map(len, l))
127 127 pieces.append(["%*s" % (ml, x) for x in l])
128 128
129 129 if pieces:
130 130 for p, l in zip(zip(*pieces), lines):
131 131 ui.write("%s: %s" % (" ".join(p), l[1]))
132 132
133 133 def archive(ui, repo, dest, **opts):
134 134 '''create unversioned archive of a repository revision
135 135
136 136 By default, the revision used is the parent of the working
137 137 directory; use "-r" to specify a different revision.
138 138
139 139 To specify the type of archive to create, use "-t". Valid
140 140 types are:
141 141
142 142 "files" (default): a directory full of files
143 143 "tar": tar archive, uncompressed
144 144 "tbz2": tar archive, compressed using bzip2
145 145 "tgz": tar archive, compressed using gzip
146 146 "uzip": zip archive, uncompressed
147 147 "zip": zip archive, compressed using deflate
148 148
149 149 The exact name of the destination archive or directory is given
150 150 using a format string; see "hg help export" for details.
151 151
152 152 Each member added to an archive file has a directory prefix
153 153 prepended. Use "-p" to specify a format string for the prefix.
154 154 The default is the basename of the archive, with suffixes removed.
155 155 '''
156 156
157 157 ctx = repo[opts.get('rev')]
158 158 if not ctx:
159 159 raise util.Abort(_('repository has no revisions'))
160 160 node = ctx.node()
161 161 dest = cmdutil.make_filename(repo, dest, node)
162 162 if os.path.realpath(dest) == repo.root:
163 163 raise util.Abort(_('repository root cannot be destination'))
164 164 matchfn = cmdutil.match(repo, [], opts)
165 165 kind = opts.get('type') or 'files'
166 166 prefix = opts.get('prefix')
167 167 if dest == '-':
168 168 if kind == 'files':
169 169 raise util.Abort(_('cannot archive plain files to stdout'))
170 170 dest = sys.stdout
171 171 if not prefix: prefix = os.path.basename(repo.root) + '-%h'
172 172 prefix = cmdutil.make_filename(repo, prefix, node)
173 173 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
174 174 matchfn, prefix)
175 175
176 176 def backout(ui, repo, node=None, rev=None, **opts):
177 177 '''reverse effect of earlier changeset
178 178
179 179 Commit the backed out changes as a new changeset. The new
180 180 changeset is a child of the backed out changeset.
181 181
182 182 If you back out a changeset other than the tip, a new head is
183 183 created. This head will be the new tip and you should merge this
184 184 backout changeset with another head (current one by default).
185 185
186 186 The --merge option remembers the parent of the working directory
187 187 before starting the backout, then merges the new head with that
188 188 changeset afterwards. This saves you from doing the merge by
189 189 hand. The result of this merge is not committed, as for a normal
190 190 merge.
191 191
192 192 See \'hg help dates\' for a list of formats valid for -d/--date.
193 193 '''
194 194 if rev and node:
195 195 raise util.Abort(_("please specify just one revision"))
196 196
197 197 if not rev:
198 198 rev = node
199 199
200 200 if not rev:
201 201 raise util.Abort(_("please specify a revision to backout"))
202 202
203 203 date = opts.get('date')
204 204 if date:
205 205 opts['date'] = util.parsedate(date)
206 206
207 207 cmdutil.bail_if_changed(repo)
208 208 node = repo.lookup(rev)
209 209
210 210 op1, op2 = repo.dirstate.parents()
211 211 a = repo.changelog.ancestor(op1, node)
212 212 if a != node:
213 213 raise util.Abort(_('cannot back out change on a different branch'))
214 214
215 215 p1, p2 = repo.changelog.parents(node)
216 216 if p1 == nullid:
217 217 raise util.Abort(_('cannot back out a change with no parents'))
218 218 if p2 != nullid:
219 219 if not opts.get('parent'):
220 220 raise util.Abort(_('cannot back out a merge changeset without '
221 221 '--parent'))
222 222 p = repo.lookup(opts['parent'])
223 223 if p not in (p1, p2):
224 224 raise util.Abort(_('%s is not a parent of %s') %
225 225 (short(p), short(node)))
226 226 parent = p
227 227 else:
228 228 if opts.get('parent'):
229 229 raise util.Abort(_('cannot use --parent on non-merge changeset'))
230 230 parent = p1
231 231
232 232 # the backout should appear on the same branch
233 233 branch = repo.dirstate.branch()
234 234 hg.clean(repo, node, show_stats=False)
235 235 repo.dirstate.setbranch(branch)
236 236 revert_opts = opts.copy()
237 237 revert_opts['date'] = None
238 238 revert_opts['all'] = True
239 239 revert_opts['rev'] = hex(parent)
240 240 revert_opts['no_backup'] = None
241 241 revert(ui, repo, **revert_opts)
242 242 commit_opts = opts.copy()
243 243 commit_opts['addremove'] = False
244 244 if not commit_opts['message'] and not commit_opts['logfile']:
245 245 commit_opts['message'] = _("Backed out changeset %s") % (short(node))
246 246 commit_opts['force_editor'] = True
247 247 commit(ui, repo, **commit_opts)
248 248 def nice(node):
249 249 return '%d:%s' % (repo.changelog.rev(node), short(node))
250 250 ui.status(_('changeset %s backs out changeset %s\n') %
251 251 (nice(repo.changelog.tip()), nice(node)))
252 252 if op1 != node:
253 253 hg.clean(repo, op1, show_stats=False)
254 254 if opts.get('merge'):
255 255 ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
256 256 hg.merge(repo, hex(repo.changelog.tip()))
257 257 else:
258 258 ui.status(_('the backout changeset is a new head - '
259 259 'do not forget to merge\n'))
260 260 ui.status(_('(use "backout --merge" '
261 261 'if you want to auto-merge)\n'))
262 262
263 263 def bisect(ui, repo, rev=None, extra=None, command=None,
264 264 reset=None, good=None, bad=None, skip=None, noupdate=None):
265 265 """subdivision search of changesets
266 266
267 267 This command helps to find changesets which introduce problems.
268 268 To use, mark the earliest changeset you know exhibits the problem
269 269 as bad, then mark the latest changeset which is free from the
270 270 problem as good. Bisect will update your working directory to a
271 271 revision for testing (unless the --noupdate option is specified).
272 272 Once you have performed tests, mark the working directory as bad
273 273 or good and bisect will either update to another candidate changeset
274 274 or announce that it has found the bad revision.
275 275
276 276 As a shortcut, you can also use the revision argument to mark a
277 277 revision as good or bad without checking it out first.
278 278
279 279 If you supply a command it will be used for automatic bisection. Its exit
280 280 status will be used as flag to mark revision as bad or good. In case exit
281 281 status is 0 the revision is marked as good, 125 - skipped, 127 (command not
282 282 found) - bisection will be aborted and any other status bigger than 0 will
283 283 mark revision as bad.
284 284 """
285 285 def print_result(nodes, good):
286 286 displayer = cmdutil.show_changeset(ui, repo, {})
287 287 transition = (good and "good" or "bad")
288 288 if len(nodes) == 1:
289 289 # narrowed it down to a single revision
290 290 ui.write(_("The first %s revision is:\n") % transition)
291 291 displayer.show(changenode=nodes[0])
292 292 else:
293 293 # multiple possible revisions
294 294 ui.write(_("Due to skipped revisions, the first "
295 295 "%s revision could be any of:\n") % transition)
296 296 for n in nodes:
297 297 displayer.show(changenode=n)
298 298
299 299 def check_state(state, interactive=True):
300 300 if not state['good'] or not state['bad']:
301 301 if (good or bad or skip or reset) and interactive:
302 302 return
303 303 if not state['good']:
304 304 raise util.Abort(_('cannot bisect (no known good revisions)'))
305 305 else:
306 306 raise util.Abort(_('cannot bisect (no known bad revisions)'))
307 307 return True
308 308
309 309 # backward compatibility
310 310 if rev in "good bad reset init".split():
311 311 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
312 312 cmd, rev, extra = rev, extra, None
313 313 if cmd == "good":
314 314 good = True
315 315 elif cmd == "bad":
316 316 bad = True
317 317 else:
318 318 reset = True
319 319 elif extra or good + bad + skip + reset + bool(command) > 1:
320 320 raise util.Abort(_('incompatible arguments'))
321 321
322 322 if reset:
323 323 p = repo.join("bisect.state")
324 324 if os.path.exists(p):
325 325 os.unlink(p)
326 326 return
327 327
328 328 state = hbisect.load_state(repo)
329 329
330 330 if command:
331 331 changesets = 1
332 332 while changesets:
333 333 # update state
334 334 status = os.spawnlp(os.P_WAIT, command)
335 335 node = repo.lookup(rev or '.')
336 336 if status == 125:
337 337 transition = "skip"
338 338 elif status == 0:
339 339 transition = "good"
340 340 # status < 0 means process was killed
341 341 elif status == 127 or status < 0:
342 342 break
343 343 else:
344 344 transition = "bad"
345 345 state[transition].append(node)
346 346 ui.note(_('Changeset %s: %s\n') % (short(node), transition))
347 347 check_state(state, interactive=False)
348 348 # bisect
349 349 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
350 350 # update to next check
351 351 cmdutil.bail_if_changed(repo)
352 352 hg.clean(repo, nodes[0], show_stats=False)
353 353 hbisect.save_state(repo, state)
354 354 return print_result(nodes, not status)
355 355
356 356 # update state
357 357 node = repo.lookup(rev or '.')
358 358 if good:
359 359 state['good'].append(node)
360 360 elif bad:
361 361 state['bad'].append(node)
362 362 elif skip:
363 363 state['skip'].append(node)
364 364
365 365 hbisect.save_state(repo, state)
366 366
367 367 if not check_state(state):
368 368 return
369 369
370 370 # actually bisect
371 371 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
372 372 if changesets == 0:
373 373 print_result(nodes, good)
374 374 else:
375 375 assert len(nodes) == 1 # only a single node can be tested next
376 376 node = nodes[0]
377 377 # compute the approximate number of remaining tests
378 378 tests, size = 0, 2
379 379 while size <= changesets:
380 380 tests, size = tests + 1, size * 2
381 381 rev = repo.changelog.rev(node)
382 382 ui.write(_("Testing changeset %s:%s "
383 383 "(%s changesets remaining, ~%s tests)\n")
384 384 % (rev, short(node), changesets, tests))
385 385 if not noupdate:
386 386 cmdutil.bail_if_changed(repo)
387 387 return hg.clean(repo, node)
388 388
389 389 def branch(ui, repo, label=None, **opts):
390 390 """set or show the current branch name
391 391
392 392 With no argument, show the current branch name. With one argument,
393 393 set the working directory branch name (the branch does not exist in
394 394 the repository until the next commit).
395 395
396 396 Unless --force is specified, branch will not let you set a
397 397 branch name that shadows an existing branch.
398 398
399 399 Use --clean to reset the working directory branch to that of the
400 400 parent of the working directory, negating a previous branch change.
401 401
402 402 Use the command 'hg update' to switch to an existing branch.
403 403 """
404 404
405 405 if opts.get('clean'):
406 406 label = repo[None].parents()[0].branch()
407 407 repo.dirstate.setbranch(label)
408 408 ui.status(_('reset working directory to branch %s\n') % label)
409 409 elif label:
410 410 if not opts.get('force') and label in repo.branchtags():
411 411 if label not in [p.branch() for p in repo.parents()]:
412 412 raise util.Abort(_('a branch of the same name already exists'
413 413 ' (use --force to override)'))
414 414 repo.dirstate.setbranch(util.fromlocal(label))
415 415 ui.status(_('marked working directory as branch %s\n') % label)
416 416 else:
417 417 ui.write("%s\n" % util.tolocal(repo.dirstate.branch()))
418 418
419 419 def branches(ui, repo, active=False):
420 420 """list repository named branches
421 421
422 422 List the repository's named branches, indicating which ones are
423 423 inactive. If active is specified, only show active branches.
424 424
425 425 A branch is considered active if it contains repository heads.
426 426
427 427 Use the command 'hg update' to switch to an existing branch.
428 428 """
429 429 hexfunc = ui.debugflag and hex or short
430 430 activebranches = [util.tolocal(repo[n].branch())
431 431 for n in repo.heads()]
432 432 branches = util.sort([(tag in activebranches, repo.changelog.rev(node), tag)
433 433 for tag, node in repo.branchtags().items()])
434 434 branches.reverse()
435 435
436 436 for isactive, node, tag in branches:
437 437 if (not active) or isactive:
438 438 if ui.quiet:
439 439 ui.write("%s\n" % tag)
440 440 else:
441 441 rev = str(node).rjust(31 - util.locallen(tag))
442 442 isinactive = ((not isactive) and " (inactive)") or ''
443 443 data = tag, rev, hexfunc(repo.lookup(node)), isinactive
444 444 ui.write("%s %s:%s%s\n" % data)
445 445
446 446 def bundle(ui, repo, fname, dest=None, **opts):
447 447 """create a changegroup file
448 448
449 449 Generate a compressed changegroup file collecting changesets not
450 450 found in the other repository.
451 451
452 452 If no destination repository is specified the destination is
453 453 assumed to have all the nodes specified by one or more --base
454 454 parameters. To create a bundle containing all changesets, use
455 455 --all (or --base null). To change the compression method applied,
456 456 use the -t option (by default, bundles are compressed using bz2).
457 457
458 458 The bundle file can then be transferred using conventional means and
459 459 applied to another repository with the unbundle or pull command.
460 460 This is useful when direct push and pull are not available or when
461 461 exporting an entire repository is undesirable.
462 462
463 463 Applying bundles preserves all changeset contents including
464 464 permissions, copy/rename information, and revision history.
465 465 """
466 466 revs = opts.get('rev') or None
467 467 if revs:
468 468 revs = [repo.lookup(rev) for rev in revs]
469 469 if opts.get('all'):
470 470 base = ['null']
471 471 else:
472 472 base = opts.get('base')
473 473 if base:
474 474 if dest:
475 475 raise util.Abort(_("--base is incompatible with specifiying "
476 476 "a destination"))
477 477 base = [repo.lookup(rev) for rev in base]
478 478 # create the right base
479 479 # XXX: nodesbetween / changegroup* should be "fixed" instead
480 480 o = []
481 481 has = {nullid: None}
482 482 for n in base:
483 483 has.update(repo.changelog.reachable(n))
484 484 if revs:
485 485 visit = list(revs)
486 486 else:
487 487 visit = repo.changelog.heads()
488 488 seen = {}
489 489 while visit:
490 490 n = visit.pop(0)
491 491 parents = [p for p in repo.changelog.parents(n) if p not in has]
492 492 if len(parents) == 0:
493 493 o.insert(0, n)
494 494 else:
495 495 for p in parents:
496 496 if p not in seen:
497 497 seen[p] = 1
498 498 visit.append(p)
499 499 else:
500 500 cmdutil.setremoteconfig(ui, opts)
501 501 dest, revs, checkout = hg.parseurl(
502 502 ui.expandpath(dest or 'default-push', dest or 'default'), revs)
503 503 other = hg.repository(ui, dest)
504 504 o = repo.findoutgoing(other, force=opts.get('force'))
505 505
506 506 if revs:
507 507 cg = repo.changegroupsubset(o, revs, 'bundle')
508 508 else:
509 509 cg = repo.changegroup(o, 'bundle')
510 510
511 511 bundletype = opts.get('type', 'bzip2').lower()
512 512 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
513 513 bundletype = btypes.get(bundletype)
514 514 if bundletype not in changegroup.bundletypes:
515 515 raise util.Abort(_('unknown bundle type specified with --type'))
516 516
517 517 changegroup.writebundle(cg, fname, bundletype)
518 518
519 519 def cat(ui, repo, file1, *pats, **opts):
520 520 """output the current or given revision of files
521 521
522 522 Print the specified files as they were at the given revision.
523 523 If no revision is given, the parent of the working directory is used,
524 524 or tip if no revision is checked out.
525 525
526 526 Output may be to a file, in which case the name of the file is
527 527 given using a format string. The formatting rules are the same as
528 528 for the export command, with the following additions:
529 529
530 530 %s basename of file being printed
531 531 %d dirname of file being printed, or '.' if in repo root
532 532 %p root-relative path name of file being printed
533 533 """
534 534 ctx = repo[opts.get('rev')]
535 535 err = 1
536 536 m = cmdutil.match(repo, (file1,) + pats, opts)
537 537 for abs in ctx.walk(m):
538 538 fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
539 539 data = ctx[abs].data()
540 540 if opts.get('decode'):
541 541 data = repo.wwritedata(abs, data)
542 542 fp.write(data)
543 543 err = 0
544 544 return err
545 545
546 546 def clone(ui, source, dest=None, **opts):
547 547 """make a copy of an existing repository
548 548
549 549 Create a copy of an existing repository in a new directory.
550 550
551 551 If no destination directory name is specified, it defaults to the
552 552 basename of the source.
553 553
554 554 The location of the source is added to the new repository's
555 555 .hg/hgrc file, as the default to be used for future pulls.
556 556
557 557 For efficiency, hardlinks are used for cloning whenever the source
558 558 and destination are on the same filesystem (note this applies only
559 559 to the repository data, not to the checked out files). Some
560 560 filesystems, such as AFS, implement hardlinking incorrectly, but
561 561 do not report errors. In these cases, use the --pull option to
562 562 avoid hardlinking.
563 563
564 564 In some cases, you can clone repositories and checked out files
565 565 using full hardlinks with
566 566
567 567 $ cp -al REPO REPOCLONE
568 568
569 569 This is the fastest way to clone, but it is not always safe. The
570 570 operation is not atomic (making sure REPO is not modified during
571 571 the operation is up to you) and you have to make sure your editor
572 572 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
573 573 this is not compatible with certain extensions that place their
574 574 metadata under the .hg directory, such as mq.
575 575
576 576 If you use the -r option to clone up to a specific revision, no
577 577 subsequent revisions will be present in the cloned repository.
578 578 This option implies --pull, even on local repositories.
579 579
580 580 If the -U option is used, the new clone will contain only a repository
581 581 (.hg) and no working copy (the working copy parent is the null revision).
582 582
583 583 See pull for valid source format details.
584 584
585 585 It is possible to specify an ssh:// URL as the destination, but no
586 586 .hg/hgrc and working directory will be created on the remote side.
587 587 Look at the help text for the pull command for important details
588 588 about ssh:// URLs.
589 589 """
590 590 cmdutil.setremoteconfig(ui, opts)
591 591 hg.clone(ui, source, dest,
592 592 pull=opts.get('pull'),
593 593 stream=opts.get('uncompressed'),
594 594 rev=opts.get('rev'),
595 595 update=not opts.get('noupdate'))
596 596
597 597 def commit(ui, repo, *pats, **opts):
598 598 """commit the specified files or all outstanding changes
599 599
600 600 Commit changes to the given files into the repository.
601 601
602 602 If a list of files is omitted, all changes reported by "hg status"
603 603 will be committed.
604 604
605 605 If you are committing the result of a merge, do not provide any
606 606 file names or -I/-X filters.
607 607
608 608 If no commit message is specified, the configured editor is started to
609 609 enter a message.
610 610
611 611 See 'hg help dates' for a list of formats valid for -d/--date.
612 612 """
613 613 def commitfunc(ui, repo, message, match, opts):
614 614 return repo.commit(match.files(), message, opts.get('user'), opts.get('date'),
615 615 match, force_editor=opts.get('force_editor'))
616 616
617 617 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
618 618 if not node:
619 619 return
620 620 cl = repo.changelog
621 621 rev = cl.rev(node)
622 622 parents = cl.parentrevs(rev)
623 623 if rev - 1 in parents:
624 624 # one of the parents was the old tip
625 625 pass
626 626 elif (parents == (nullrev, nullrev) or
627 627 len(cl.heads(cl.node(parents[0]))) > 1 and
628 628 (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
629 629 ui.status(_('created new head\n'))
630 630
631 631 if ui.debugflag:
632 632 ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
633 633 elif ui.verbose:
634 634 ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
635 635
636 636 def copy(ui, repo, *pats, **opts):
637 637 """mark files as copied for the next commit
638 638
639 639 Mark dest as having copies of source files. If dest is a
640 640 directory, copies are put in that directory. If dest is a file,
641 641 there can only be one source.
642 642
643 643 By default, this command copies the contents of files as they
644 644 stand in the working directory. If invoked with --after, the
645 645 operation is recorded, but no copying is performed.
646 646
647 647 This command takes effect in the next commit. To undo a copy
648 648 before that, see hg revert.
649 649 """
650 650 wlock = repo.wlock(False)
651 651 try:
652 652 return cmdutil.copy(ui, repo, pats, opts)
653 653 finally:
654 654 del wlock
655 655
656 656 def debugancestor(ui, repo, *args):
657 657 """find the ancestor revision of two revisions in a given index"""
658 658 if len(args) == 3:
659 659 index, rev1, rev2 = args
660 660 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
661 661 lookup = r.lookup
662 662 elif len(args) == 2:
663 663 if not repo:
664 664 raise util.Abort(_("There is no Mercurial repository here "
665 665 "(.hg not found)"))
666 666 rev1, rev2 = args
667 667 r = repo.changelog
668 668 lookup = repo.lookup
669 669 else:
670 670 raise util.Abort(_('either two or three arguments required'))
671 671 a = r.ancestor(lookup(rev1), lookup(rev2))
672 672 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
673 673
674 674 def debugcomplete(ui, cmd='', **opts):
675 675 """returns the completion list associated with the given command"""
676 676
677 677 if opts.get('options'):
678 678 options = []
679 679 otables = [globalopts]
680 680 if cmd:
681 681 aliases, entry = cmdutil.findcmd(cmd, table, False)
682 682 otables.append(entry[1])
683 683 for t in otables:
684 684 for o in t:
685 685 if o[0]:
686 686 options.append('-%s' % o[0])
687 687 options.append('--%s' % o[1])
688 688 ui.write("%s\n" % "\n".join(options))
689 689 return
690 690
691 691 ui.write("%s\n" % "\n".join(util.sort(cmdutil.findpossible(cmd, table))))
692 692
693 693 def debugfsinfo(ui, path = "."):
694 694 file('.debugfsinfo', 'w').write('')
695 695 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
696 696 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
697 697 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
698 698 and 'yes' or 'no'))
699 699 os.unlink('.debugfsinfo')
700 700
701 701 def debugrebuildstate(ui, repo, rev="tip"):
702 702 """rebuild the dirstate as it would look like for the given revision"""
703 703 ctx = repo[rev]
704 704 wlock = repo.wlock()
705 705 try:
706 706 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
707 707 finally:
708 708 del wlock
709 709
710 710 def debugcheckstate(ui, repo):
711 711 """validate the correctness of the current dirstate"""
712 712 parent1, parent2 = repo.dirstate.parents()
713 713 m1 = repo[parent1].manifest()
714 714 m2 = repo[parent2].manifest()
715 715 errors = 0
716 716 for f in repo.dirstate:
717 717 state = repo.dirstate[f]
718 718 if state in "nr" and f not in m1:
719 719 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
720 720 errors += 1
721 721 if state in "a" and f in m1:
722 722 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
723 723 errors += 1
724 724 if state in "m" and f not in m1 and f not in m2:
725 725 ui.warn(_("%s in state %s, but not in either manifest\n") %
726 726 (f, state))
727 727 errors += 1
728 728 for f in m1:
729 729 state = repo.dirstate[f]
730 730 if state not in "nrm":
731 731 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
732 732 errors += 1
733 733 if errors:
734 734 error = _(".hg/dirstate inconsistent with current parent's manifest")
735 735 raise util.Abort(error)
736 736
737 737 def showconfig(ui, repo, *values, **opts):
738 738 """show combined config settings from all hgrc files
739 739
740 740 With no args, print names and values of all config items.
741 741
742 742 With one arg of the form section.name, print just the value of
743 743 that config item.
744 744
745 745 With multiple args, print names and values of all config items
746 746 with matching section names."""
747 747
748 748 untrusted = bool(opts.get('untrusted'))
749 749 if values:
750 750 if len([v for v in values if '.' in v]) > 1:
751 751 raise util.Abort(_('only one config item permitted'))
752 752 for section, name, value in ui.walkconfig(untrusted=untrusted):
753 753 sectname = section + '.' + name
754 754 if values:
755 755 for v in values:
756 756 if v == section:
757 757 ui.write('%s=%s\n' % (sectname, value))
758 758 elif v == sectname:
759 759 ui.write(value, '\n')
760 760 else:
761 761 ui.write('%s=%s\n' % (sectname, value))
762 762
763 763 def debugsetparents(ui, repo, rev1, rev2=None):
764 764 """manually set the parents of the current working directory
765 765
766 766 This is useful for writing repository conversion tools, but should
767 767 be used with care.
768 768 """
769 769
770 770 if not rev2:
771 771 rev2 = hex(nullid)
772 772
773 773 wlock = repo.wlock()
774 774 try:
775 775 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
776 776 finally:
777 777 del wlock
778 778
779 779 def debugstate(ui, repo, nodates=None):
780 780 """show the contents of the current dirstate"""
781 781 timestr = ""
782 782 showdate = not nodates
783 783 for file_, ent in util.sort(repo.dirstate._map.items()):
784 784 if showdate:
785 785 if ent[3] == -1:
786 786 # Pad or slice to locale representation
787 787 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
788 788 timestr = 'unset'
789 789 timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
790 790 else:
791 791 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
792 792 if ent[1] & 020000:
793 793 mode = 'lnk'
794 794 else:
795 795 mode = '%3o' % (ent[1] & 0777)
796 796 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
797 797 for f in repo.dirstate.copies():
798 798 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
799 799
800 800 def debugdata(ui, file_, rev):
801 801 """dump the contents of a data file revision"""
802 802 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
803 803 try:
804 804 ui.write(r.revision(r.lookup(rev)))
805 805 except KeyError:
806 806 raise util.Abort(_('invalid revision identifier %s') % rev)
807 807
808 808 def debugdate(ui, date, range=None, **opts):
809 809 """parse and display a date"""
810 810 if opts["extended"]:
811 811 d = util.parsedate(date, util.extendeddateformats)
812 812 else:
813 813 d = util.parsedate(date)
814 814 ui.write("internal: %s %s\n" % d)
815 815 ui.write("standard: %s\n" % util.datestr(d))
816 816 if range:
817 817 m = util.matchdate(range)
818 818 ui.write("match: %s\n" % m(d[0]))
819 819
820 820 def debugindex(ui, file_):
821 821 """dump the contents of an index file"""
822 822 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
823 823 ui.write(" rev offset length base linkrev" +
824 824 " nodeid p1 p2\n")
825 825 for i in r:
826 826 node = r.node(i)
827 827 try:
828 828 pp = r.parents(node)
829 829 except:
830 830 pp = [nullid, nullid]
831 831 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
832 i, r.start(i), r.length(i), r.base(i), r.linkrev(node),
832 i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
833 833 short(node), short(pp[0]), short(pp[1])))
834 834
835 835 def debugindexdot(ui, file_):
836 836 """dump an index DAG as a .dot file"""
837 837 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
838 838 ui.write("digraph G {\n")
839 839 for i in r:
840 840 node = r.node(i)
841 841 pp = r.parents(node)
842 842 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
843 843 if pp[1] != nullid:
844 844 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
845 845 ui.write("}\n")
846 846
847 847 def debuginstall(ui):
848 848 '''test Mercurial installation'''
849 849
850 850 def writetemp(contents):
851 851 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
852 852 f = os.fdopen(fd, "wb")
853 853 f.write(contents)
854 854 f.close()
855 855 return name
856 856
857 857 problems = 0
858 858
859 859 # encoding
860 860 ui.status(_("Checking encoding (%s)...\n") % util._encoding)
861 861 try:
862 862 util.fromlocal("test")
863 863 except util.Abort, inst:
864 864 ui.write(" %s\n" % inst)
865 865 ui.write(_(" (check that your locale is properly set)\n"))
866 866 problems += 1
867 867
868 868 # compiled modules
869 869 ui.status(_("Checking extensions...\n"))
870 870 try:
871 871 import bdiff, mpatch, base85
872 872 except Exception, inst:
873 873 ui.write(" %s\n" % inst)
874 874 ui.write(_(" One or more extensions could not be found"))
875 875 ui.write(_(" (check that you compiled the extensions)\n"))
876 876 problems += 1
877 877
878 878 # templates
879 879 ui.status(_("Checking templates...\n"))
880 880 try:
881 881 import templater
882 882 t = templater.templater(templater.templatepath("map-cmdline.default"))
883 883 except Exception, inst:
884 884 ui.write(" %s\n" % inst)
885 885 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
886 886 problems += 1
887 887
888 888 # patch
889 889 ui.status(_("Checking patch...\n"))
890 890 patchproblems = 0
891 891 a = "1\n2\n3\n4\n"
892 892 b = "1\n2\n3\ninsert\n4\n"
893 893 fa = writetemp(a)
894 894 d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
895 895 os.path.basename(fa))
896 896 fd = writetemp(d)
897 897
898 898 files = {}
899 899 try:
900 900 patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
901 901 except util.Abort, e:
902 902 ui.write(_(" patch call failed:\n"))
903 903 ui.write(" " + str(e) + "\n")
904 904 patchproblems += 1
905 905 else:
906 906 if list(files) != [os.path.basename(fa)]:
907 907 ui.write(_(" unexpected patch output!\n"))
908 908 patchproblems += 1
909 909 a = file(fa).read()
910 910 if a != b:
911 911 ui.write(_(" patch test failed!\n"))
912 912 patchproblems += 1
913 913
914 914 if patchproblems:
915 915 if ui.config('ui', 'patch'):
916 916 ui.write(_(" (Current patch tool may be incompatible with patch,"
917 917 " or misconfigured. Please check your .hgrc file)\n"))
918 918 else:
919 919 ui.write(_(" Internal patcher failure, please report this error"
920 920 " to http://www.selenic.com/mercurial/bts\n"))
921 921 problems += patchproblems
922 922
923 923 os.unlink(fa)
924 924 os.unlink(fd)
925 925
926 926 # editor
927 927 ui.status(_("Checking commit editor...\n"))
928 928 editor = ui.geteditor()
929 929 cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
930 930 if not cmdpath:
931 931 if editor == 'vi':
932 932 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
933 933 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
934 934 else:
935 935 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
936 936 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
937 937 problems += 1
938 938
939 939 # check username
940 940 ui.status(_("Checking username...\n"))
941 941 user = os.environ.get("HGUSER")
942 942 if user is None:
943 943 user = ui.config("ui", "username")
944 944 if user is None:
945 945 user = os.environ.get("EMAIL")
946 946 if not user:
947 947 ui.warn(" ")
948 948 ui.username()
949 949 ui.write(_(" (specify a username in your .hgrc file)\n"))
950 950
951 951 if not problems:
952 952 ui.status(_("No problems detected\n"))
953 953 else:
954 954 ui.write(_("%s problems detected,"
955 955 " please check your install!\n") % problems)
956 956
957 957 return problems
958 958
959 959 def debugrename(ui, repo, file1, *pats, **opts):
960 960 """dump rename information"""
961 961
962 962 ctx = repo[opts.get('rev')]
963 963 m = cmdutil.match(repo, (file1,) + pats, opts)
964 964 for abs in ctx.walk(m):
965 965 fctx = ctx[abs]
966 966 o = fctx.filelog().renamed(fctx.filenode())
967 967 rel = m.rel(abs)
968 968 if o:
969 969 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
970 970 else:
971 971 ui.write(_("%s not renamed\n") % rel)
972 972
973 973 def debugwalk(ui, repo, *pats, **opts):
974 974 """show how files match on given patterns"""
975 975 m = cmdutil.match(repo, pats, opts)
976 976 items = list(repo.walk(m))
977 977 if not items:
978 978 return
979 979 fmt = 'f %%-%ds %%-%ds %%s' % (
980 980 max([len(abs) for abs in items]),
981 981 max([len(m.rel(abs)) for abs in items]))
982 982 for abs in items:
983 983 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
984 984 ui.write("%s\n" % line.rstrip())
985 985
986 986 def diff(ui, repo, *pats, **opts):
987 987 """diff repository (or selected files)
988 988
989 989 Show differences between revisions for the specified files.
990 990
991 991 Differences between files are shown using the unified diff format.
992 992
993 993 NOTE: diff may generate unexpected results for merges, as it will
994 994 default to comparing against the working directory's first parent
995 995 changeset if no revisions are specified.
996 996
997 997 When two revision arguments are given, then changes are shown
998 998 between those revisions. If only one revision is specified then
999 999 that revision is compared to the working directory, and, when no
1000 1000 revisions are specified, the working directory files are compared
1001 1001 to its parent.
1002 1002
1003 1003 Without the -a option, diff will avoid generating diffs of files
1004 1004 it detects as binary. With -a, diff will generate a diff anyway,
1005 1005 probably with undesirable results.
1006 1006
1007 1007 Use the --git option to generate diffs in the git extended diff
1008 1008 format. Read the gitdiffs help topic for more information.
1009 1009 """
1010 1010 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
1011 1011
1012 1012 m = cmdutil.match(repo, pats, opts)
1013 1013 it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
1014 1014 for chunk in it:
1015 1015 repo.ui.write(chunk)
1016 1016
1017 1017 def export(ui, repo, *changesets, **opts):
1018 1018 """dump the header and diffs for one or more changesets
1019 1019
1020 1020 Print the changeset header and diffs for one or more revisions.
1021 1021
1022 1022 The information shown in the changeset header is: author,
1023 1023 changeset hash, parent(s) and commit comment.
1024 1024
1025 1025 NOTE: export may generate unexpected diff output for merge changesets,
1026 1026 as it will compare the merge changeset against its first parent only.
1027 1027
1028 1028 Output may be to a file, in which case the name of the file is
1029 1029 given using a format string. The formatting rules are as follows:
1030 1030
1031 1031 %% literal "%" character
1032 1032 %H changeset hash (40 bytes of hexadecimal)
1033 1033 %N number of patches being generated
1034 1034 %R changeset revision number
1035 1035 %b basename of the exporting repository
1036 1036 %h short-form changeset hash (12 bytes of hexadecimal)
1037 1037 %n zero-padded sequence number, starting at 1
1038 1038 %r zero-padded changeset revision number
1039 1039
1040 1040 Without the -a option, export will avoid generating diffs of files
1041 1041 it detects as binary. With -a, export will generate a diff anyway,
1042 1042 probably with undesirable results.
1043 1043
1044 1044 Use the --git option to generate diffs in the git extended diff
1045 1045 format. Read the gitdiffs help topic for more information.
1046 1046
1047 1047 With the --switch-parent option, the diff will be against the second
1048 1048 parent. It can be useful to review a merge.
1049 1049 """
1050 1050 if not changesets:
1051 1051 raise util.Abort(_("export requires at least one changeset"))
1052 1052 revs = cmdutil.revrange(repo, changesets)
1053 1053 if len(revs) > 1:
1054 1054 ui.note(_('exporting patches:\n'))
1055 1055 else:
1056 1056 ui.note(_('exporting patch:\n'))
1057 1057 patch.export(repo, revs, template=opts.get('output'),
1058 1058 switch_parent=opts.get('switch_parent'),
1059 1059 opts=patch.diffopts(ui, opts))
1060 1060
1061 1061 def grep(ui, repo, pattern, *pats, **opts):
1062 1062 """search for a pattern in specified files and revisions
1063 1063
1064 1064 Search revisions of files for a regular expression.
1065 1065
1066 1066 This command behaves differently than Unix grep. It only accepts
1067 1067 Python/Perl regexps. It searches repository history, not the
1068 1068 working directory. It always prints the revision number in which
1069 1069 a match appears.
1070 1070
1071 1071 By default, grep only prints output for the first revision of a
1072 1072 file in which it finds a match. To get it to print every revision
1073 1073 that contains a change in match status ("-" for a match that
1074 1074 becomes a non-match, or "+" for a non-match that becomes a match),
1075 1075 use the --all flag.
1076 1076 """
1077 1077 reflags = 0
1078 1078 if opts.get('ignore_case'):
1079 1079 reflags |= re.I
1080 1080 try:
1081 1081 regexp = re.compile(pattern, reflags)
1082 1082 except Exception, inst:
1083 1083 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
1084 1084 return None
1085 1085 sep, eol = ':', '\n'
1086 1086 if opts.get('print0'):
1087 1087 sep = eol = '\0'
1088 1088
1089 1089 fcache = {}
1090 1090 def getfile(fn):
1091 1091 if fn not in fcache:
1092 1092 fcache[fn] = repo.file(fn)
1093 1093 return fcache[fn]
1094 1094
1095 1095 def matchlines(body):
1096 1096 begin = 0
1097 1097 linenum = 0
1098 1098 while True:
1099 1099 match = regexp.search(body, begin)
1100 1100 if not match:
1101 1101 break
1102 1102 mstart, mend = match.span()
1103 1103 linenum += body.count('\n', begin, mstart) + 1
1104 1104 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1105 1105 begin = body.find('\n', mend) + 1 or len(body)
1106 1106 lend = begin - 1
1107 1107 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1108 1108
1109 1109 class linestate(object):
1110 1110 def __init__(self, line, linenum, colstart, colend):
1111 1111 self.line = line
1112 1112 self.linenum = linenum
1113 1113 self.colstart = colstart
1114 1114 self.colend = colend
1115 1115
1116 1116 def __hash__(self):
1117 1117 return hash((self.linenum, self.line))
1118 1118
1119 1119 def __eq__(self, other):
1120 1120 return self.line == other.line
1121 1121
1122 1122 matches = {}
1123 1123 copies = {}
1124 1124 def grepbody(fn, rev, body):
1125 1125 matches[rev].setdefault(fn, [])
1126 1126 m = matches[rev][fn]
1127 1127 for lnum, cstart, cend, line in matchlines(body):
1128 1128 s = linestate(line, lnum, cstart, cend)
1129 1129 m.append(s)
1130 1130
1131 1131 def difflinestates(a, b):
1132 1132 sm = difflib.SequenceMatcher(None, a, b)
1133 1133 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1134 1134 if tag == 'insert':
1135 1135 for i in xrange(blo, bhi):
1136 1136 yield ('+', b[i])
1137 1137 elif tag == 'delete':
1138 1138 for i in xrange(alo, ahi):
1139 1139 yield ('-', a[i])
1140 1140 elif tag == 'replace':
1141 1141 for i in xrange(alo, ahi):
1142 1142 yield ('-', a[i])
1143 1143 for i in xrange(blo, bhi):
1144 1144 yield ('+', b[i])
1145 1145
1146 1146 prev = {}
1147 1147 def display(fn, rev, states, prevstates):
1148 1148 datefunc = ui.quiet and util.shortdate or util.datestr
1149 1149 found = False
1150 1150 filerevmatches = {}
1151 1151 r = prev.get(fn, -1)
1152 1152 if opts.get('all'):
1153 1153 iter = difflinestates(states, prevstates)
1154 1154 else:
1155 1155 iter = [('', l) for l in prevstates]
1156 1156 for change, l in iter:
1157 1157 cols = [fn, str(r)]
1158 1158 if opts.get('line_number'):
1159 1159 cols.append(str(l.linenum))
1160 1160 if opts.get('all'):
1161 1161 cols.append(change)
1162 1162 if opts.get('user'):
1163 1163 cols.append(ui.shortuser(get(r)[1]))
1164 1164 if opts.get('date'):
1165 1165 cols.append(datefunc(get(r)[2]))
1166 1166 if opts.get('files_with_matches'):
1167 1167 c = (fn, r)
1168 1168 if c in filerevmatches:
1169 1169 continue
1170 1170 filerevmatches[c] = 1
1171 1171 else:
1172 1172 cols.append(l.line)
1173 1173 ui.write(sep.join(cols), eol)
1174 1174 found = True
1175 1175 return found
1176 1176
1177 1177 fstate = {}
1178 1178 skip = {}
1179 1179 get = util.cachefunc(lambda r: repo[r].changeset())
1180 1180 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1181 1181 found = False
1182 1182 follow = opts.get('follow')
1183 1183 for st, rev, fns in changeiter:
1184 1184 if st == 'window':
1185 1185 matches.clear()
1186 1186 elif st == 'add':
1187 1187 ctx = repo[rev]
1188 1188 matches[rev] = {}
1189 1189 for fn in fns:
1190 1190 if fn in skip:
1191 1191 continue
1192 1192 try:
1193 1193 grepbody(fn, rev, getfile(fn).read(ctx.filenode(fn)))
1194 1194 fstate.setdefault(fn, [])
1195 1195 if follow:
1196 1196 copied = getfile(fn).renamed(ctx.filenode(fn))
1197 1197 if copied:
1198 1198 copies.setdefault(rev, {})[fn] = copied[0]
1199 1199 except revlog.LookupError:
1200 1200 pass
1201 1201 elif st == 'iter':
1202 1202 for fn, m in util.sort(matches[rev].items()):
1203 1203 copy = copies.get(rev, {}).get(fn)
1204 1204 if fn in skip:
1205 1205 if copy:
1206 1206 skip[copy] = True
1207 1207 continue
1208 1208 if fn in prev or fstate[fn]:
1209 1209 r = display(fn, rev, m, fstate[fn])
1210 1210 found = found or r
1211 1211 if r and not opts.get('all'):
1212 1212 skip[fn] = True
1213 1213 if copy:
1214 1214 skip[copy] = True
1215 1215 fstate[fn] = m
1216 1216 if copy:
1217 1217 fstate[copy] = m
1218 1218 prev[fn] = rev
1219 1219
1220 1220 for fn, state in util.sort(fstate.items()):
1221 1221 if fn in skip:
1222 1222 continue
1223 1223 if fn not in copies.get(prev[fn], {}):
1224 1224 found = display(fn, rev, {}, state) or found
1225 1225 return (not found and 1) or 0
1226 1226
1227 1227 def heads(ui, repo, *branchrevs, **opts):
1228 1228 """show current repository heads or show branch heads
1229 1229
1230 1230 With no arguments, show all repository head changesets.
1231 1231
1232 1232 If branch or revisions names are given this will show the heads of
1233 1233 the specified branches or the branches those revisions are tagged
1234 1234 with.
1235 1235
1236 1236 Repository "heads" are changesets that don't have child
1237 1237 changesets. They are where development generally takes place and
1238 1238 are the usual targets for update and merge operations.
1239 1239
1240 1240 Branch heads are changesets that have a given branch tag, but have
1241 1241 no child changesets with that tag. They are usually where
1242 1242 development on the given branch takes place.
1243 1243 """
1244 1244 if opts.get('rev'):
1245 1245 start = repo.lookup(opts['rev'])
1246 1246 else:
1247 1247 start = None
1248 1248 if not branchrevs:
1249 1249 # Assume we're looking repo-wide heads if no revs were specified.
1250 1250 heads = repo.heads(start)
1251 1251 else:
1252 1252 heads = []
1253 1253 visitedset = util.set()
1254 1254 for branchrev in branchrevs:
1255 1255 branch = repo[branchrev].branch()
1256 1256 if branch in visitedset:
1257 1257 continue
1258 1258 visitedset.add(branch)
1259 1259 bheads = repo.branchheads(branch, start)
1260 1260 if not bheads:
1261 1261 if branch != branchrev:
1262 1262 ui.warn(_("no changes on branch %s containing %s are "
1263 1263 "reachable from %s\n")
1264 1264 % (branch, branchrev, opts.get('rev')))
1265 1265 else:
1266 1266 ui.warn(_("no changes on branch %s are reachable from %s\n")
1267 1267 % (branch, opts.get('rev')))
1268 1268 heads.extend(bheads)
1269 1269 if not heads:
1270 1270 return 1
1271 1271 displayer = cmdutil.show_changeset(ui, repo, opts)
1272 1272 for n in heads:
1273 1273 displayer.show(changenode=n)
1274 1274
1275 1275 def help_(ui, name=None, with_version=False):
1276 1276 """show help for a given topic or a help overview
1277 1277
1278 1278 With no arguments, print a list of commands and short help.
1279 1279
1280 1280 Given a topic, extension, or command name, print help for that topic."""
1281 1281 option_lists = []
1282 1282
1283 1283 def addglobalopts(aliases):
1284 1284 if ui.verbose:
1285 1285 option_lists.append((_("global options:"), globalopts))
1286 1286 if name == 'shortlist':
1287 1287 option_lists.append((_('use "hg help" for the full list '
1288 1288 'of commands'), ()))
1289 1289 else:
1290 1290 if name == 'shortlist':
1291 1291 msg = _('use "hg help" for the full list of commands '
1292 1292 'or "hg -v" for details')
1293 1293 elif aliases:
1294 1294 msg = _('use "hg -v help%s" to show aliases and '
1295 1295 'global options') % (name and " " + name or "")
1296 1296 else:
1297 1297 msg = _('use "hg -v help %s" to show global options') % name
1298 1298 option_lists.append((msg, ()))
1299 1299
1300 1300 def helpcmd(name):
1301 1301 if with_version:
1302 1302 version_(ui)
1303 1303 ui.write('\n')
1304 1304
1305 1305 try:
1306 1306 aliases, i = cmdutil.findcmd(name, table, False)
1307 1307 except cmdutil.AmbiguousCommand, inst:
1308 1308 select = lambda c: c.lstrip('^').startswith(inst.args[0])
1309 1309 helplist(_('list of commands:\n\n'), select)
1310 1310 return
1311 1311
1312 1312 # synopsis
1313 1313 ui.write("%s\n" % i[2])
1314 1314
1315 1315 # aliases
1316 1316 if not ui.quiet and len(aliases) > 1:
1317 1317 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
1318 1318
1319 1319 # description
1320 1320 doc = gettext(i[0].__doc__)
1321 1321 if not doc:
1322 1322 doc = _("(No help text available)")
1323 1323 if ui.quiet:
1324 1324 doc = doc.splitlines(0)[0]
1325 1325 ui.write("\n%s\n" % doc.rstrip())
1326 1326
1327 1327 if not ui.quiet:
1328 1328 # options
1329 1329 if i[1]:
1330 1330 option_lists.append((_("options:\n"), i[1]))
1331 1331
1332 1332 addglobalopts(False)
1333 1333
1334 1334 def helplist(header, select=None):
1335 1335 h = {}
1336 1336 cmds = {}
1337 1337 for c, e in table.items():
1338 1338 f = c.split("|", 1)[0]
1339 1339 if select and not select(f):
1340 1340 continue
1341 1341 if (not select and name != 'shortlist' and
1342 1342 e[0].__module__ != __name__):
1343 1343 continue
1344 1344 if name == "shortlist" and not f.startswith("^"):
1345 1345 continue
1346 1346 f = f.lstrip("^")
1347 1347 if not ui.debugflag and f.startswith("debug"):
1348 1348 continue
1349 1349 doc = gettext(e[0].__doc__)
1350 1350 if not doc:
1351 1351 doc = _("(No help text available)")
1352 1352 h[f] = doc.splitlines(0)[0].rstrip()
1353 1353 cmds[f] = c.lstrip("^")
1354 1354
1355 1355 if not h:
1356 1356 ui.status(_('no commands defined\n'))
1357 1357 return
1358 1358
1359 1359 ui.status(header)
1360 1360 fns = util.sort(h)
1361 1361 m = max(map(len, fns))
1362 1362 for f in fns:
1363 1363 if ui.verbose:
1364 1364 commands = cmds[f].replace("|",", ")
1365 1365 ui.write(" %s:\n %s\n"%(commands, h[f]))
1366 1366 else:
1367 1367 ui.write(' %-*s %s\n' % (m, f, h[f]))
1368 1368
1369 1369 exts = list(extensions.extensions())
1370 1370 if exts and name != 'shortlist':
1371 1371 ui.write(_('\nenabled extensions:\n\n'))
1372 1372 maxlength = 0
1373 1373 exthelps = []
1374 1374 for ename, ext in exts:
1375 1375 doc = (ext.__doc__ or _('(no help text available)'))
1376 1376 ename = ename.split('.')[-1]
1377 1377 maxlength = max(len(ename), maxlength)
1378 1378 exthelps.append((ename, doc.splitlines(0)[0].strip()))
1379 1379 for ename, text in exthelps:
1380 1380 ui.write(_(' %s %s\n') % (ename.ljust(maxlength), text))
1381 1381
1382 1382 if not ui.quiet:
1383 1383 addglobalopts(True)
1384 1384
1385 1385 def helptopic(name):
1386 1386 for names, header, doc in help.helptable:
1387 1387 if name in names:
1388 1388 break
1389 1389 else:
1390 1390 raise cmdutil.UnknownCommand(name)
1391 1391
1392 1392 # description
1393 1393 if not doc:
1394 1394 doc = _("(No help text available)")
1395 1395 if callable(doc):
1396 1396 doc = doc()
1397 1397
1398 1398 ui.write("%s\n" % header)
1399 1399 ui.write("%s\n" % doc.rstrip())
1400 1400
1401 1401 def helpext(name):
1402 1402 try:
1403 1403 mod = extensions.find(name)
1404 1404 except KeyError:
1405 1405 raise cmdutil.UnknownCommand(name)
1406 1406
1407 1407 doc = gettext(mod.__doc__) or _('No help text available')
1408 1408 doc = doc.splitlines(0)
1409 1409 ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
1410 1410 for d in doc[1:]:
1411 1411 ui.write(d, '\n')
1412 1412
1413 1413 ui.status('\n')
1414 1414
1415 1415 try:
1416 1416 ct = mod.cmdtable
1417 1417 except AttributeError:
1418 1418 ct = {}
1419 1419
1420 1420 modcmds = dict.fromkeys([c.split('|', 1)[0] for c in ct])
1421 1421 helplist(_('list of commands:\n\n'), modcmds.has_key)
1422 1422
1423 1423 if name and name != 'shortlist':
1424 1424 i = None
1425 1425 for f in (helpcmd, helptopic, helpext):
1426 1426 try:
1427 1427 f(name)
1428 1428 i = None
1429 1429 break
1430 1430 except cmdutil.UnknownCommand, inst:
1431 1431 i = inst
1432 1432 if i:
1433 1433 raise i
1434 1434
1435 1435 else:
1436 1436 # program name
1437 1437 if ui.verbose or with_version:
1438 1438 version_(ui)
1439 1439 else:
1440 1440 ui.status(_("Mercurial Distributed SCM\n"))
1441 1441 ui.status('\n')
1442 1442
1443 1443 # list of commands
1444 1444 if name == "shortlist":
1445 1445 header = _('basic commands:\n\n')
1446 1446 else:
1447 1447 header = _('list of commands:\n\n')
1448 1448
1449 1449 helplist(header)
1450 1450
1451 1451 # list all option lists
1452 1452 opt_output = []
1453 1453 for title, options in option_lists:
1454 1454 opt_output.append(("\n%s" % title, None))
1455 1455 for shortopt, longopt, default, desc in options:
1456 1456 if "DEPRECATED" in desc and not ui.verbose: continue
1457 1457 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
1458 1458 longopt and " --%s" % longopt),
1459 1459 "%s%s" % (desc,
1460 1460 default
1461 1461 and _(" (default: %s)") % default
1462 1462 or "")))
1463 1463
1464 1464 if not name:
1465 1465 ui.write(_("\nadditional help topics:\n\n"))
1466 1466 topics = []
1467 1467 for names, header, doc in help.helptable:
1468 1468 names = [(-len(name), name) for name in names]
1469 1469 names.sort()
1470 1470 topics.append((names[0][1], header))
1471 1471 topics_len = max([len(s[0]) for s in topics])
1472 1472 for t, desc in topics:
1473 1473 ui.write(" %-*s %s\n" % (topics_len, t, desc))
1474 1474
1475 1475 if opt_output:
1476 1476 opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
1477 1477 for first, second in opt_output:
1478 1478 if second:
1479 1479 ui.write(" %-*s %s\n" % (opts_len, first, second))
1480 1480 else:
1481 1481 ui.write("%s\n" % first)
1482 1482
1483 1483 def identify(ui, repo, source=None,
1484 1484 rev=None, num=None, id=None, branch=None, tags=None):
1485 1485 """identify the working copy or specified revision
1486 1486
1487 1487 With no revision, print a summary of the current state of the repo.
1488 1488
1489 1489 With a path, do a lookup in another repository.
1490 1490
1491 1491 This summary identifies the repository state using one or two parent
1492 1492 hash identifiers, followed by a "+" if there are uncommitted changes
1493 1493 in the working directory, a list of tags for this revision and a branch
1494 1494 name for non-default branches.
1495 1495 """
1496 1496
1497 1497 if not repo and not source:
1498 1498 raise util.Abort(_("There is no Mercurial repository here "
1499 1499 "(.hg not found)"))
1500 1500
1501 1501 hexfunc = ui.debugflag and hex or short
1502 1502 default = not (num or id or branch or tags)
1503 1503 output = []
1504 1504
1505 1505 if source:
1506 1506 source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
1507 1507 srepo = hg.repository(ui, source)
1508 1508 if not rev and revs:
1509 1509 rev = revs[0]
1510 1510 if not rev:
1511 1511 rev = "tip"
1512 1512 if num or branch or tags:
1513 1513 raise util.Abort(
1514 1514 "can't query remote revision number, branch, or tags")
1515 1515 output = [hexfunc(srepo.lookup(rev))]
1516 1516 elif not rev:
1517 1517 ctx = repo[None]
1518 1518 parents = ctx.parents()
1519 1519 changed = False
1520 1520 if default or id or num:
1521 1521 changed = ctx.files() + ctx.deleted()
1522 1522 if default or id:
1523 1523 output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
1524 1524 (changed) and "+" or "")]
1525 1525 if num:
1526 1526 output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
1527 1527 (changed) and "+" or ""))
1528 1528 else:
1529 1529 ctx = repo[rev]
1530 1530 if default or id:
1531 1531 output = [hexfunc(ctx.node())]
1532 1532 if num:
1533 1533 output.append(str(ctx.rev()))
1534 1534
1535 1535 if not source and default and not ui.quiet:
1536 1536 b = util.tolocal(ctx.branch())
1537 1537 if b != 'default':
1538 1538 output.append("(%s)" % b)
1539 1539
1540 1540 # multiple tags for a single parent separated by '/'
1541 1541 t = "/".join(ctx.tags())
1542 1542 if t:
1543 1543 output.append(t)
1544 1544
1545 1545 if branch:
1546 1546 output.append(util.tolocal(ctx.branch()))
1547 1547
1548 1548 if tags:
1549 1549 output.extend(ctx.tags())
1550 1550
1551 1551 ui.write("%s\n" % ' '.join(output))
1552 1552
1553 1553 def import_(ui, repo, patch1, *patches, **opts):
1554 1554 """import an ordered set of patches
1555 1555
1556 1556 Import a list of patches and commit them individually.
1557 1557
1558 1558 If there are outstanding changes in the working directory, import
1559 1559 will abort unless given the -f flag.
1560 1560
1561 1561 You can import a patch straight from a mail message. Even patches
1562 1562 as attachments work (body part must be type text/plain or
1563 1563 text/x-patch to be used). From and Subject headers of email
1564 1564 message are used as default committer and commit message. All
1565 1565 text/plain body parts before first diff are added to commit
1566 1566 message.
1567 1567
1568 1568 If the imported patch was generated by hg export, user and description
1569 1569 from patch override values from message headers and body. Values
1570 1570 given on command line with -m and -u override these.
1571 1571
1572 1572 If --exact is specified, import will set the working directory
1573 1573 to the parent of each patch before applying it, and will abort
1574 1574 if the resulting changeset has a different ID than the one
1575 1575 recorded in the patch. This may happen due to character set
1576 1576 problems or other deficiencies in the text patch format.
1577 1577
1578 1578 To read a patch from standard input, use patch name "-".
1579 1579 See 'hg help dates' for a list of formats valid for -d/--date.
1580 1580 """
1581 1581 patches = (patch1,) + patches
1582 1582
1583 1583 date = opts.get('date')
1584 1584 if date:
1585 1585 opts['date'] = util.parsedate(date)
1586 1586
1587 1587 if opts.get('exact') or not opts.get('force'):
1588 1588 cmdutil.bail_if_changed(repo)
1589 1589
1590 1590 d = opts["base"]
1591 1591 strip = opts["strip"]
1592 1592 wlock = lock = None
1593 1593 try:
1594 1594 wlock = repo.wlock()
1595 1595 lock = repo.lock()
1596 1596 for p in patches:
1597 1597 pf = os.path.join(d, p)
1598 1598
1599 1599 if pf == '-':
1600 1600 ui.status(_("applying patch from stdin\n"))
1601 1601 pf = sys.stdin
1602 1602 else:
1603 1603 ui.status(_("applying %s\n") % p)
1604 1604 pf = url.open(ui, pf)
1605 1605 data = patch.extract(ui, pf)
1606 1606 tmpname, message, user, date, branch, nodeid, p1, p2 = data
1607 1607
1608 1608 if tmpname is None:
1609 1609 raise util.Abort(_('no diffs found'))
1610 1610
1611 1611 try:
1612 1612 cmdline_message = cmdutil.logmessage(opts)
1613 1613 if cmdline_message:
1614 1614 # pickup the cmdline msg
1615 1615 message = cmdline_message
1616 1616 elif message:
1617 1617 # pickup the patch msg
1618 1618 message = message.strip()
1619 1619 else:
1620 1620 # launch the editor
1621 1621 message = None
1622 1622 ui.debug(_('message:\n%s\n') % message)
1623 1623
1624 1624 wp = repo.parents()
1625 1625 if opts.get('exact'):
1626 1626 if not nodeid or not p1:
1627 1627 raise util.Abort(_('not a mercurial patch'))
1628 1628 p1 = repo.lookup(p1)
1629 1629 p2 = repo.lookup(p2 or hex(nullid))
1630 1630
1631 1631 if p1 != wp[0].node():
1632 1632 hg.clean(repo, p1)
1633 1633 repo.dirstate.setparents(p1, p2)
1634 1634 elif p2:
1635 1635 try:
1636 1636 p1 = repo.lookup(p1)
1637 1637 p2 = repo.lookup(p2)
1638 1638 if p1 == wp[0].node():
1639 1639 repo.dirstate.setparents(p1, p2)
1640 1640 except RepoError:
1641 1641 pass
1642 1642 if opts.get('exact') or opts.get('import_branch'):
1643 1643 repo.dirstate.setbranch(branch or 'default')
1644 1644
1645 1645 files = {}
1646 1646 try:
1647 1647 fuzz = patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1648 1648 files=files)
1649 1649 finally:
1650 1650 files = patch.updatedir(ui, repo, files)
1651 1651 if not opts.get('no_commit'):
1652 1652 n = repo.commit(files, message, opts.get('user') or user,
1653 1653 opts.get('date') or date)
1654 1654 if opts.get('exact'):
1655 1655 if hex(n) != nodeid:
1656 1656 repo.rollback()
1657 1657 raise util.Abort(_('patch is damaged'
1658 1658 ' or loses information'))
1659 1659 # Force a dirstate write so that the next transaction
1660 1660 # backups an up-do-date file.
1661 1661 repo.dirstate.write()
1662 1662 finally:
1663 1663 os.unlink(tmpname)
1664 1664 finally:
1665 1665 del lock, wlock
1666 1666
1667 1667 def incoming(ui, repo, source="default", **opts):
1668 1668 """show new changesets found in source
1669 1669
1670 1670 Show new changesets found in the specified path/URL or the default
1671 1671 pull location. These are the changesets that would be pulled if a pull
1672 1672 was requested.
1673 1673
1674 1674 For remote repository, using --bundle avoids downloading the changesets
1675 1675 twice if the incoming is followed by a pull.
1676 1676
1677 1677 See pull for valid source format details.
1678 1678 """
1679 1679 limit = cmdutil.loglimit(opts)
1680 1680 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
1681 1681 cmdutil.setremoteconfig(ui, opts)
1682 1682
1683 1683 other = hg.repository(ui, source)
1684 1684 ui.status(_('comparing with %s\n') % url.hidepassword(source))
1685 1685 if revs:
1686 1686 revs = [other.lookup(rev) for rev in revs]
1687 1687 incoming = repo.findincoming(other, heads=revs, force=opts["force"])
1688 1688 if not incoming:
1689 1689 try:
1690 1690 os.unlink(opts["bundle"])
1691 1691 except:
1692 1692 pass
1693 1693 ui.status(_("no changes found\n"))
1694 1694 return 1
1695 1695
1696 1696 cleanup = None
1697 1697 try:
1698 1698 fname = opts["bundle"]
1699 1699 if fname or not other.local():
1700 1700 # create a bundle (uncompressed if other repo is not local)
1701 1701 if revs is None:
1702 1702 cg = other.changegroup(incoming, "incoming")
1703 1703 else:
1704 1704 cg = other.changegroupsubset(incoming, revs, 'incoming')
1705 1705 bundletype = other.local() and "HG10BZ" or "HG10UN"
1706 1706 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
1707 1707 # keep written bundle?
1708 1708 if opts["bundle"]:
1709 1709 cleanup = None
1710 1710 if not other.local():
1711 1711 # use the created uncompressed bundlerepo
1712 1712 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1713 1713
1714 1714 o = other.changelog.nodesbetween(incoming, revs)[0]
1715 1715 if opts.get('newest_first'):
1716 1716 o.reverse()
1717 1717 displayer = cmdutil.show_changeset(ui, other, opts)
1718 1718 count = 0
1719 1719 for n in o:
1720 1720 if count >= limit:
1721 1721 break
1722 1722 parents = [p for p in other.changelog.parents(n) if p != nullid]
1723 1723 if opts.get('no_merges') and len(parents) == 2:
1724 1724 continue
1725 1725 count += 1
1726 1726 displayer.show(changenode=n)
1727 1727 finally:
1728 1728 if hasattr(other, 'close'):
1729 1729 other.close()
1730 1730 if cleanup:
1731 1731 os.unlink(cleanup)
1732 1732
1733 1733 def init(ui, dest=".", **opts):
1734 1734 """create a new repository in the given directory
1735 1735
1736 1736 Initialize a new repository in the given directory. If the given
1737 1737 directory does not exist, it is created.
1738 1738
1739 1739 If no directory is given, the current directory is used.
1740 1740
1741 1741 It is possible to specify an ssh:// URL as the destination.
1742 1742 Look at the help text for the pull command for important details
1743 1743 about ssh:// URLs.
1744 1744 """
1745 1745 cmdutil.setremoteconfig(ui, opts)
1746 1746 hg.repository(ui, dest, create=1)
1747 1747
1748 1748 def locate(ui, repo, *pats, **opts):
1749 1749 """locate files matching specific patterns
1750 1750
1751 1751 Print all files under Mercurial control whose names match the
1752 1752 given patterns.
1753 1753
1754 1754 This command searches the entire repository by default. To search
1755 1755 just the current directory and its subdirectories, use
1756 1756 "--include .".
1757 1757
1758 1758 If no patterns are given to match, this command prints all file
1759 1759 names.
1760 1760
1761 1761 If you want to feed the output of this command into the "xargs"
1762 1762 command, use the "-0" option to both this command and "xargs".
1763 1763 This will avoid the problem of "xargs" treating single filenames
1764 1764 that contain white space as multiple filenames.
1765 1765 """
1766 1766 end = opts.get('print0') and '\0' or '\n'
1767 1767 rev = opts.get('rev') or None
1768 1768
1769 1769 ret = 1
1770 1770 m = cmdutil.match(repo, pats, opts, default='relglob')
1771 1771 m.bad = lambda x,y: False
1772 1772 for abs in repo[rev].walk(m):
1773 1773 if not rev and abs not in repo.dirstate:
1774 1774 continue
1775 1775 if opts.get('fullpath'):
1776 1776 ui.write(os.path.join(repo.root, abs), end)
1777 1777 else:
1778 1778 ui.write(((pats and m.rel(abs)) or abs), end)
1779 1779 ret = 0
1780 1780
1781 1781 return ret
1782 1782
1783 1783 def log(ui, repo, *pats, **opts):
1784 1784 """show revision history of entire repository or files
1785 1785
1786 1786 Print the revision history of the specified files or the entire
1787 1787 project.
1788 1788
1789 1789 File history is shown without following rename or copy history of
1790 1790 files. Use -f/--follow with a file name to follow history across
1791 1791 renames and copies. --follow without a file name will only show
1792 1792 ancestors or descendants of the starting revision. --follow-first
1793 1793 only follows the first parent of merge revisions.
1794 1794
1795 1795 If no revision range is specified, the default is tip:0 unless
1796 1796 --follow is set, in which case the working directory parent is
1797 1797 used as the starting revision.
1798 1798
1799 1799 See 'hg help dates' for a list of formats valid for -d/--date.
1800 1800
1801 1801 By default this command outputs: changeset id and hash, tags,
1802 1802 non-trivial parents, user, date and time, and a summary for each
1803 1803 commit. When the -v/--verbose switch is used, the list of changed
1804 1804 files and full commit message is shown.
1805 1805
1806 1806 NOTE: log -p may generate unexpected diff output for merge
1807 1807 changesets, as it will compare the merge changeset against its
1808 1808 first parent only. Also, the files: list will only reflect files
1809 1809 that are different from BOTH parents.
1810 1810
1811 1811 """
1812 1812
1813 1813 get = util.cachefunc(lambda r: repo[r].changeset())
1814 1814 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1815 1815
1816 1816 limit = cmdutil.loglimit(opts)
1817 1817 count = 0
1818 1818
1819 1819 if opts.get('copies') and opts.get('rev'):
1820 1820 endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
1821 1821 else:
1822 1822 endrev = len(repo)
1823 1823 rcache = {}
1824 1824 ncache = {}
1825 1825 def getrenamed(fn, rev):
1826 1826 '''looks up all renames for a file (up to endrev) the first
1827 1827 time the file is given. It indexes on the changerev and only
1828 1828 parses the manifest if linkrev != changerev.
1829 1829 Returns rename info for fn at changerev rev.'''
1830 1830 if fn not in rcache:
1831 1831 rcache[fn] = {}
1832 1832 ncache[fn] = {}
1833 1833 fl = repo.file(fn)
1834 1834 for i in fl:
1835 1835 node = fl.node(i)
1836 lr = fl.linkrev(node)
1836 lr = fl.linkrev(i)
1837 1837 renamed = fl.renamed(node)
1838 1838 rcache[fn][lr] = renamed
1839 1839 if renamed:
1840 1840 ncache[fn][node] = renamed
1841 1841 if lr >= endrev:
1842 1842 break
1843 1843 if rev in rcache[fn]:
1844 1844 return rcache[fn][rev]
1845 1845
1846 1846 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1847 1847 # filectx logic.
1848 1848
1849 1849 try:
1850 1850 return repo[rev][fn].renamed()
1851 1851 except revlog.LookupError:
1852 1852 pass
1853 1853 return None
1854 1854
1855 1855 df = False
1856 1856 if opts["date"]:
1857 1857 df = util.matchdate(opts["date"])
1858 1858
1859 1859 only_branches = opts.get('only_branch')
1860 1860
1861 1861 displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
1862 1862 for st, rev, fns in changeiter:
1863 1863 if st == 'add':
1864 1864 changenode = repo.changelog.node(rev)
1865 1865 parents = [p for p in repo.changelog.parentrevs(rev)
1866 1866 if p != nullrev]
1867 1867 if opts.get('no_merges') and len(parents) == 2:
1868 1868 continue
1869 1869 if opts.get('only_merges') and len(parents) != 2:
1870 1870 continue
1871 1871
1872 1872 if only_branches:
1873 1873 revbranch = get(rev)[5]['branch']
1874 1874 if revbranch not in only_branches:
1875 1875 continue
1876 1876
1877 1877 if df:
1878 1878 changes = get(rev)
1879 1879 if not df(changes[2][0]):
1880 1880 continue
1881 1881
1882 1882 if opts.get('keyword'):
1883 1883 changes = get(rev)
1884 1884 miss = 0
1885 1885 for k in [kw.lower() for kw in opts['keyword']]:
1886 1886 if not (k in changes[1].lower() or
1887 1887 k in changes[4].lower() or
1888 1888 k in " ".join(changes[3]).lower()):
1889 1889 miss = 1
1890 1890 break
1891 1891 if miss:
1892 1892 continue
1893 1893
1894 1894 if opts['user']:
1895 1895 changes = get(rev)
1896 1896 miss = 0
1897 1897 for k in opts['user']:
1898 1898 if k != changes[1]:
1899 1899 miss = 1
1900 1900 break
1901 1901 if miss:
1902 1902 continue
1903 1903
1904 1904 copies = []
1905 1905 if opts.get('copies') and rev:
1906 1906 for fn in get(rev)[3]:
1907 1907 rename = getrenamed(fn, rev)
1908 1908 if rename:
1909 1909 copies.append((fn, rename[0]))
1910 1910 displayer.show(rev, changenode, copies=copies)
1911 1911 elif st == 'iter':
1912 1912 if count == limit: break
1913 1913 if displayer.flush(rev):
1914 1914 count += 1
1915 1915
1916 1916 def manifest(ui, repo, node=None, rev=None):
1917 1917 """output the current or given revision of the project manifest
1918 1918
1919 1919 Print a list of version controlled files for the given revision.
1920 1920 If no revision is given, the parent of the working directory is used,
1921 1921 or tip if no revision is checked out.
1922 1922
1923 1923 The manifest is the list of files being version controlled. If no revision
1924 1924 is given then the first parent of the working directory is used.
1925 1925
1926 1926 With -v flag, print file permissions, symlink and executable bits. With
1927 1927 --debug flag, print file revision hashes.
1928 1928 """
1929 1929
1930 1930 if rev and node:
1931 1931 raise util.Abort(_("please specify just one revision"))
1932 1932
1933 1933 if not node:
1934 1934 node = rev
1935 1935
1936 1936 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
1937 1937 ctx = repo[node]
1938 1938 for f in ctx:
1939 1939 if ui.debugflag:
1940 1940 ui.write("%40s " % hex(ctx.manifest()[f]))
1941 1941 if ui.verbose:
1942 1942 ui.write(decor[ctx.flags(f)])
1943 1943 ui.write("%s\n" % f)
1944 1944
1945 1945 def merge(ui, repo, node=None, force=None, rev=None):
1946 1946 """merge working directory with another revision
1947 1947
1948 1948 Merge the contents of the current working directory and the
1949 1949 requested revision. Files that changed between either parent are
1950 1950 marked as changed for the next commit and a commit must be
1951 1951 performed before any further updates are allowed.
1952 1952
1953 1953 If no revision is specified, the working directory's parent is a
1954 1954 head revision, and the current branch contains exactly one other head,
1955 1955 the other head is merged with by default. Otherwise, an explicit
1956 1956 revision to merge with must be provided.
1957 1957 """
1958 1958
1959 1959 if rev and node:
1960 1960 raise util.Abort(_("please specify just one revision"))
1961 1961 if not node:
1962 1962 node = rev
1963 1963
1964 1964 if not node:
1965 1965 branch = repo.changectx(None).branch()
1966 1966 bheads = repo.branchheads(branch)
1967 1967 if len(bheads) > 2:
1968 1968 raise util.Abort(_("branch '%s' has %d heads - "
1969 1969 "please merge with an explicit rev") %
1970 1970 (branch, len(bheads)))
1971 1971
1972 1972 parent = repo.dirstate.parents()[0]
1973 1973 if len(bheads) == 1:
1974 1974 if len(repo.heads()) > 1:
1975 1975 raise util.Abort(_("branch '%s' has one head - "
1976 1976 "please merge with an explicit rev") %
1977 1977 branch)
1978 1978 msg = _('there is nothing to merge')
1979 1979 if parent != repo.lookup(repo[None].branch()):
1980 1980 msg = _('%s - use "hg update" instead') % msg
1981 1981 raise util.Abort(msg)
1982 1982
1983 1983 if parent not in bheads:
1984 1984 raise util.Abort(_('working dir not at a head rev - '
1985 1985 'use "hg update" or merge with an explicit rev'))
1986 1986 node = parent == bheads[0] and bheads[-1] or bheads[0]
1987 1987 return hg.merge(repo, node, force=force)
1988 1988
1989 1989 def outgoing(ui, repo, dest=None, **opts):
1990 1990 """show changesets not found in destination
1991 1991
1992 1992 Show changesets not found in the specified destination repository or
1993 1993 the default push location. These are the changesets that would be pushed
1994 1994 if a push was requested.
1995 1995
1996 1996 See pull for valid destination format details.
1997 1997 """
1998 1998 limit = cmdutil.loglimit(opts)
1999 1999 dest, revs, checkout = hg.parseurl(
2000 2000 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2001 2001 cmdutil.setremoteconfig(ui, opts)
2002 2002 if revs:
2003 2003 revs = [repo.lookup(rev) for rev in revs]
2004 2004
2005 2005 other = hg.repository(ui, dest)
2006 2006 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2007 2007 o = repo.findoutgoing(other, force=opts.get('force'))
2008 2008 if not o:
2009 2009 ui.status(_("no changes found\n"))
2010 2010 return 1
2011 2011 o = repo.changelog.nodesbetween(o, revs)[0]
2012 2012 if opts.get('newest_first'):
2013 2013 o.reverse()
2014 2014 displayer = cmdutil.show_changeset(ui, repo, opts)
2015 2015 count = 0
2016 2016 for n in o:
2017 2017 if count >= limit:
2018 2018 break
2019 2019 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2020 2020 if opts.get('no_merges') and len(parents) == 2:
2021 2021 continue
2022 2022 count += 1
2023 2023 displayer.show(changenode=n)
2024 2024
2025 2025 def parents(ui, repo, file_=None, **opts):
2026 2026 """show the parents of the working dir or revision
2027 2027
2028 2028 Print the working directory's parent revisions. If a
2029 2029 revision is given via --rev, the parent of that revision
2030 2030 will be printed. If a file argument is given, revision in
2031 2031 which the file was last changed (before the working directory
2032 2032 revision or the argument to --rev if given) is printed.
2033 2033 """
2034 2034 rev = opts.get('rev')
2035 2035 if rev:
2036 2036 ctx = repo[rev]
2037 2037 else:
2038 2038 ctx = repo[None]
2039 2039
2040 2040 if file_:
2041 2041 m = cmdutil.match(repo, (file_,), opts)
2042 2042 if m.anypats() or len(m.files()) != 1:
2043 2043 raise util.Abort(_('can only specify an explicit file name'))
2044 2044 file_ = m.files()[0]
2045 2045 filenodes = []
2046 2046 for cp in ctx.parents():
2047 2047 if not cp:
2048 2048 continue
2049 2049 try:
2050 2050 filenodes.append(cp.filenode(file_))
2051 2051 except revlog.LookupError:
2052 2052 pass
2053 2053 if not filenodes:
2054 2054 raise util.Abort(_("'%s' not found in manifest!") % file_)
2055 2055 fl = repo.file(file_)
2056 p = [repo.lookup(fl.linkrev(fn)) for fn in filenodes]
2056 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
2057 2057 else:
2058 2058 p = [cp.node() for cp in ctx.parents()]
2059 2059
2060 2060 displayer = cmdutil.show_changeset(ui, repo, opts)
2061 2061 for n in p:
2062 2062 if n != nullid:
2063 2063 displayer.show(changenode=n)
2064 2064
2065 2065 def paths(ui, repo, search=None):
2066 2066 """show definition of symbolic path names
2067 2067
2068 2068 Show definition of symbolic path name NAME. If no name is given, show
2069 2069 definition of available names.
2070 2070
2071 2071 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2072 2072 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2073 2073 """
2074 2074 if search:
2075 2075 for name, path in ui.configitems("paths"):
2076 2076 if name == search:
2077 2077 ui.write("%s\n" % url.hidepassword(path))
2078 2078 return
2079 2079 ui.warn(_("not found!\n"))
2080 2080 return 1
2081 2081 else:
2082 2082 for name, path in ui.configitems("paths"):
2083 2083 ui.write("%s = %s\n" % (name, url.hidepassword(path)))
2084 2084
2085 2085 def postincoming(ui, repo, modheads, optupdate, checkout):
2086 2086 if modheads == 0:
2087 2087 return
2088 2088 if optupdate:
2089 2089 if modheads <= 1 or checkout:
2090 2090 return hg.update(repo, checkout)
2091 2091 else:
2092 2092 ui.status(_("not updating, since new heads added\n"))
2093 2093 if modheads > 1:
2094 2094 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2095 2095 else:
2096 2096 ui.status(_("(run 'hg update' to get a working copy)\n"))
2097 2097
2098 2098 def pull(ui, repo, source="default", **opts):
2099 2099 """pull changes from the specified source
2100 2100
2101 2101 Pull changes from a remote repository to a local one.
2102 2102
2103 2103 This finds all changes from the repository at the specified path
2104 2104 or URL and adds them to the local repository. By default, this
2105 2105 does not update the copy of the project in the working directory.
2106 2106
2107 2107 Valid URLs are of the form:
2108 2108
2109 2109 local/filesystem/path (or file://local/filesystem/path)
2110 2110 http://[user[:pass]@]host[:port]/[path]
2111 2111 https://[user[:pass]@]host[:port]/[path]
2112 2112 ssh://[user[:pass]@]host[:port]/[path]
2113 2113
2114 2114 Paths in the local filesystem can either point to Mercurial
2115 2115 repositories or to bundle files (as created by 'hg bundle' or
2116 2116 'hg incoming --bundle').
2117 2117
2118 2118 An optional identifier after # indicates a particular branch, tag,
2119 2119 or changeset to pull.
2120 2120
2121 2121 Some notes about using SSH with Mercurial:
2122 2122 - SSH requires an accessible shell account on the destination machine
2123 2123 and a copy of hg in the remote path or specified with as remotecmd.
2124 2124 - path is relative to the remote user's home directory by default.
2125 2125 Use an extra slash at the start of a path to specify an absolute path:
2126 2126 ssh://example.com//tmp/repository
2127 2127 - Mercurial doesn't use its own compression via SSH; the right thing
2128 2128 to do is to configure it in your ~/.ssh/config, e.g.:
2129 2129 Host *.mylocalnetwork.example.com
2130 2130 Compression no
2131 2131 Host *
2132 2132 Compression yes
2133 2133 Alternatively specify "ssh -C" as your ssh command in your hgrc or
2134 2134 with the --ssh command line option.
2135 2135 """
2136 2136 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
2137 2137 cmdutil.setremoteconfig(ui, opts)
2138 2138
2139 2139 other = hg.repository(ui, source)
2140 2140 ui.status(_('pulling from %s\n') % url.hidepassword(source))
2141 2141 if revs:
2142 2142 try:
2143 2143 revs = [other.lookup(rev) for rev in revs]
2144 2144 except NoCapability:
2145 2145 error = _("Other repository doesn't support revision lookup, "
2146 2146 "so a rev cannot be specified.")
2147 2147 raise util.Abort(error)
2148 2148
2149 2149 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
2150 2150 return postincoming(ui, repo, modheads, opts.get('update'), checkout)
2151 2151
2152 2152 def push(ui, repo, dest=None, **opts):
2153 2153 """push changes to the specified destination
2154 2154
2155 2155 Push changes from the local repository to the given destination.
2156 2156
2157 2157 This is the symmetrical operation for pull. It helps to move
2158 2158 changes from the current repository to a different one. If the
2159 2159 destination is local this is identical to a pull in that directory
2160 2160 from the current one.
2161 2161
2162 2162 By default, push will refuse to run if it detects the result would
2163 2163 increase the number of remote heads. This generally indicates the
2164 2164 the client has forgotten to pull and merge before pushing.
2165 2165
2166 2166 Valid URLs are of the form:
2167 2167
2168 2168 local/filesystem/path (or file://local/filesystem/path)
2169 2169 ssh://[user[:pass]@]host[:port]/[path]
2170 2170 http://[user[:pass]@]host[:port]/[path]
2171 2171 https://[user[:pass]@]host[:port]/[path]
2172 2172
2173 2173 An optional identifier after # indicates a particular branch, tag,
2174 2174 or changeset to push. If -r is used, the named changeset and all its
2175 2175 ancestors will be pushed to the remote repository.
2176 2176
2177 2177 Look at the help text for the pull command for important details
2178 2178 about ssh:// URLs.
2179 2179
2180 2180 Pushing to http:// and https:// URLs is only possible, if this
2181 2181 feature is explicitly enabled on the remote Mercurial server.
2182 2182 """
2183 2183 dest, revs, checkout = hg.parseurl(
2184 2184 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2185 2185 cmdutil.setremoteconfig(ui, opts)
2186 2186
2187 2187 other = hg.repository(ui, dest)
2188 2188 ui.status(_('pushing to %s\n') % url.hidepassword(dest))
2189 2189 if revs:
2190 2190 revs = [repo.lookup(rev) for rev in revs]
2191 2191 r = repo.push(other, opts.get('force'), revs=revs)
2192 2192 return r == 0
2193 2193
2194 2194 def rawcommit(ui, repo, *pats, **opts):
2195 2195 """raw commit interface (DEPRECATED)
2196 2196
2197 2197 (DEPRECATED)
2198 2198 Lowlevel commit, for use in helper scripts.
2199 2199
2200 2200 This command is not intended to be used by normal users, as it is
2201 2201 primarily useful for importing from other SCMs.
2202 2202
2203 2203 This command is now deprecated and will be removed in a future
2204 2204 release, please use debugsetparents and commit instead.
2205 2205 """
2206 2206
2207 2207 ui.warn(_("(the rawcommit command is deprecated)\n"))
2208 2208
2209 2209 message = cmdutil.logmessage(opts)
2210 2210
2211 2211 files = cmdutil.match(repo, pats, opts).files()
2212 2212 if opts.get('files'):
2213 2213 files += open(opts['files']).read().splitlines()
2214 2214
2215 2215 parents = [repo.lookup(p) for p in opts['parent']]
2216 2216
2217 2217 try:
2218 2218 repo.rawcommit(files, message, opts['user'], opts['date'], *parents)
2219 2219 except ValueError, inst:
2220 2220 raise util.Abort(str(inst))
2221 2221
2222 2222 def recover(ui, repo):
2223 2223 """roll back an interrupted transaction
2224 2224
2225 2225 Recover from an interrupted commit or pull.
2226 2226
2227 2227 This command tries to fix the repository status after an interrupted
2228 2228 operation. It should only be necessary when Mercurial suggests it.
2229 2229 """
2230 2230 if repo.recover():
2231 2231 return hg.verify(repo)
2232 2232 return 1
2233 2233
2234 2234 def remove(ui, repo, *pats, **opts):
2235 2235 """remove the specified files on the next commit
2236 2236
2237 2237 Schedule the indicated files for removal from the repository.
2238 2238
2239 2239 This only removes files from the current branch, not from the entire
2240 2240 project history. -A can be used to remove only files that have already
2241 2241 been deleted, -f can be used to force deletion, and -Af can be used
2242 2242 to remove files from the next revision without deleting them.
2243 2243
2244 2244 The following table details the behavior of remove for different file
2245 2245 states (columns) and option combinations (rows). The file states are
2246 2246 Added, Clean, Modified and Missing (as reported by hg status). The
2247 2247 actions are Warn, Remove (from branch) and Delete (from disk).
2248 2248
2249 2249 A C M !
2250 2250 none W RD W R
2251 2251 -f R RD RD R
2252 2252 -A W W W R
2253 2253 -Af R R R R
2254 2254
2255 2255 This command schedules the files to be removed at the next commit.
2256 2256 To undo a remove before that, see hg revert.
2257 2257 """
2258 2258
2259 2259 after, force = opts.get('after'), opts.get('force')
2260 2260 if not pats and not after:
2261 2261 raise util.Abort(_('no files specified'))
2262 2262
2263 2263 m = cmdutil.match(repo, pats, opts)
2264 2264 s = repo.status(match=m, clean=True)
2265 2265 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2266 2266
2267 2267 def warn(files, reason):
2268 2268 for f in files:
2269 2269 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
2270 2270 % (m.rel(f), reason))
2271 2271
2272 2272 if force:
2273 2273 remove, forget = modified + deleted + clean, added
2274 2274 elif after:
2275 2275 remove, forget = deleted, []
2276 2276 warn(modified + added + clean, _('still exists'))
2277 2277 else:
2278 2278 remove, forget = deleted + clean, []
2279 2279 warn(modified, _('is modified'))
2280 2280 warn(added, _('has been marked for add'))
2281 2281
2282 2282 for f in util.sort(remove + forget):
2283 2283 if ui.verbose or not m.exact(f):
2284 2284 ui.status(_('removing %s\n') % m.rel(f))
2285 2285
2286 2286 repo.forget(forget)
2287 2287 repo.remove(remove, unlink=not after)
2288 2288
2289 2289 def rename(ui, repo, *pats, **opts):
2290 2290 """rename files; equivalent of copy + remove
2291 2291
2292 2292 Mark dest as copies of sources; mark sources for deletion. If
2293 2293 dest is a directory, copies are put in that directory. If dest is
2294 2294 a file, there can only be one source.
2295 2295
2296 2296 By default, this command copies the contents of files as they
2297 2297 stand in the working directory. If invoked with --after, the
2298 2298 operation is recorded, but no copying is performed.
2299 2299
2300 2300 This command takes effect in the next commit. To undo a rename
2301 2301 before that, see hg revert.
2302 2302 """
2303 2303 wlock = repo.wlock(False)
2304 2304 try:
2305 2305 return cmdutil.copy(ui, repo, pats, opts, rename=True)
2306 2306 finally:
2307 2307 del wlock
2308 2308
2309 2309 def resolve(ui, repo, *pats, **opts):
2310 2310 """resolve file merges from a branch merge or update
2311 2311
2312 2312 This command will attempt to resolve unresolved merges from the
2313 2313 last update or merge command. This will use the local file
2314 2314 revision preserved at the last update or merge to cleanly retry
2315 2315 the file merge attempt. With no file or options specified, this
2316 2316 command will attempt to resolve all unresolved files.
2317 2317
2318 2318 The codes used to show the status of files are:
2319 2319 U = unresolved
2320 2320 R = resolved
2321 2321 """
2322 2322
2323 2323 if len([x for x in opts if opts[x]]) > 1:
2324 2324 raise util.Abort(_("too many options specified"))
2325 2325
2326 2326 ms = merge_.mergestate(repo)
2327 2327 m = cmdutil.match(repo, pats, opts)
2328 2328
2329 2329 for f in ms:
2330 2330 if m(f):
2331 2331 if opts.get("list"):
2332 2332 ui.write("%s %s\n" % (ms[f].upper(), f))
2333 2333 elif opts.get("mark"):
2334 2334 ms.mark(f, "r")
2335 2335 elif opts.get("unmark"):
2336 2336 ms.mark(f, "u")
2337 2337 else:
2338 2338 wctx = repo[None]
2339 2339 mctx = wctx.parents()[-1]
2340 2340 ms.resolve(f, wctx, mctx)
2341 2341
2342 2342 def revert(ui, repo, *pats, **opts):
2343 2343 """restore individual files or dirs to an earlier state
2344 2344
2345 2345 (use update -r to check out earlier revisions, revert does not
2346 2346 change the working dir parents)
2347 2347
2348 2348 With no revision specified, revert the named files or directories
2349 2349 to the contents they had in the parent of the working directory.
2350 2350 This restores the contents of the affected files to an unmodified
2351 2351 state and unschedules adds, removes, copies, and renames. If the
2352 2352 working directory has two parents, you must explicitly specify the
2353 2353 revision to revert to.
2354 2354
2355 2355 Using the -r option, revert the given files or directories to their
2356 2356 contents as of a specific revision. This can be helpful to "roll
2357 2357 back" some or all of an earlier change.
2358 2358 See 'hg help dates' for a list of formats valid for -d/--date.
2359 2359
2360 2360 Revert modifies the working directory. It does not commit any
2361 2361 changes, or change the parent of the working directory. If you
2362 2362 revert to a revision other than the parent of the working
2363 2363 directory, the reverted files will thus appear modified
2364 2364 afterwards.
2365 2365
2366 2366 If a file has been deleted, it is restored. If the executable
2367 2367 mode of a file was changed, it is reset.
2368 2368
2369 2369 If names are given, all files matching the names are reverted.
2370 2370 If no arguments are given, no files are reverted.
2371 2371
2372 2372 Modified files are saved with a .orig suffix before reverting.
2373 2373 To disable these backups, use --no-backup.
2374 2374 """
2375 2375
2376 2376 if opts["date"]:
2377 2377 if opts["rev"]:
2378 2378 raise util.Abort(_("you can't specify a revision and a date"))
2379 2379 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
2380 2380
2381 2381 if not pats and not opts.get('all'):
2382 2382 raise util.Abort(_('no files or directories specified; '
2383 2383 'use --all to revert the whole repo'))
2384 2384
2385 2385 parent, p2 = repo.dirstate.parents()
2386 2386 if not opts.get('rev') and p2 != nullid:
2387 2387 raise util.Abort(_('uncommitted merge - please provide a '
2388 2388 'specific revision'))
2389 2389 ctx = repo[opts.get('rev')]
2390 2390 node = ctx.node()
2391 2391 mf = ctx.manifest()
2392 2392 if node == parent:
2393 2393 pmf = mf
2394 2394 else:
2395 2395 pmf = None
2396 2396
2397 2397 # need all matching names in dirstate and manifest of target rev,
2398 2398 # so have to walk both. do not print errors if files exist in one
2399 2399 # but not other.
2400 2400
2401 2401 names = {}
2402 2402
2403 2403 wlock = repo.wlock()
2404 2404 try:
2405 2405 # walk dirstate.
2406 2406 files = []
2407 2407
2408 2408 m = cmdutil.match(repo, pats, opts)
2409 2409 m.bad = lambda x,y: False
2410 2410 for abs in repo.walk(m):
2411 2411 names[abs] = m.rel(abs), m.exact(abs)
2412 2412
2413 2413 # walk target manifest.
2414 2414
2415 2415 def badfn(path, msg):
2416 2416 if path in names:
2417 2417 return False
2418 2418 path_ = path + '/'
2419 2419 for f in names:
2420 2420 if f.startswith(path_):
2421 2421 return False
2422 2422 repo.ui.warn("%s: %s\n" % (m.rel(path), msg))
2423 2423 return False
2424 2424
2425 2425 m = cmdutil.match(repo, pats, opts)
2426 2426 m.bad = badfn
2427 2427 for abs in repo[node].walk(m):
2428 2428 if abs not in names:
2429 2429 names[abs] = m.rel(abs), m.exact(abs)
2430 2430
2431 2431 m = cmdutil.matchfiles(repo, names)
2432 2432 changes = repo.status(match=m)[:4]
2433 2433 modified, added, removed, deleted = map(dict.fromkeys, changes)
2434 2434
2435 2435 # if f is a rename, also revert the source
2436 2436 cwd = repo.getcwd()
2437 2437 for f in added:
2438 2438 src = repo.dirstate.copied(f)
2439 2439 if src and src not in names and repo.dirstate[src] == 'r':
2440 2440 removed[src] = None
2441 2441 names[src] = (repo.pathto(src, cwd), True)
2442 2442
2443 2443 def removeforget(abs):
2444 2444 if repo.dirstate[abs] == 'a':
2445 2445 return _('forgetting %s\n')
2446 2446 return _('removing %s\n')
2447 2447
2448 2448 revert = ([], _('reverting %s\n'))
2449 2449 add = ([], _('adding %s\n'))
2450 2450 remove = ([], removeforget)
2451 2451 undelete = ([], _('undeleting %s\n'))
2452 2452
2453 2453 disptable = (
2454 2454 # dispatch table:
2455 2455 # file state
2456 2456 # action if in target manifest
2457 2457 # action if not in target manifest
2458 2458 # make backup if in target manifest
2459 2459 # make backup if not in target manifest
2460 2460 (modified, revert, remove, True, True),
2461 2461 (added, revert, remove, True, False),
2462 2462 (removed, undelete, None, False, False),
2463 2463 (deleted, revert, remove, False, False),
2464 2464 )
2465 2465
2466 2466 for abs, (rel, exact) in util.sort(names.items()):
2467 2467 mfentry = mf.get(abs)
2468 2468 target = repo.wjoin(abs)
2469 2469 def handle(xlist, dobackup):
2470 2470 xlist[0].append(abs)
2471 2471 if dobackup and not opts.get('no_backup') and util.lexists(target):
2472 2472 bakname = "%s.orig" % rel
2473 2473 ui.note(_('saving current version of %s as %s\n') %
2474 2474 (rel, bakname))
2475 2475 if not opts.get('dry_run'):
2476 2476 util.copyfile(target, bakname)
2477 2477 if ui.verbose or not exact:
2478 2478 msg = xlist[1]
2479 2479 if not isinstance(msg, basestring):
2480 2480 msg = msg(abs)
2481 2481 ui.status(msg % rel)
2482 2482 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2483 2483 if abs not in table: continue
2484 2484 # file has changed in dirstate
2485 2485 if mfentry:
2486 2486 handle(hitlist, backuphit)
2487 2487 elif misslist is not None:
2488 2488 handle(misslist, backupmiss)
2489 2489 break
2490 2490 else:
2491 2491 if abs not in repo.dirstate:
2492 2492 if mfentry:
2493 2493 handle(add, True)
2494 2494 elif exact:
2495 2495 ui.warn(_('file not managed: %s\n') % rel)
2496 2496 continue
2497 2497 # file has not changed in dirstate
2498 2498 if node == parent:
2499 2499 if exact: ui.warn(_('no changes needed to %s\n') % rel)
2500 2500 continue
2501 2501 if pmf is None:
2502 2502 # only need parent manifest in this unlikely case,
2503 2503 # so do not read by default
2504 2504 pmf = repo[parent].manifest()
2505 2505 if abs in pmf:
2506 2506 if mfentry:
2507 2507 # if version of file is same in parent and target
2508 2508 # manifests, do nothing
2509 2509 if (pmf[abs] != mfentry or
2510 2510 pmf.flags(abs) != mf.flags(abs)):
2511 2511 handle(revert, False)
2512 2512 else:
2513 2513 handle(remove, False)
2514 2514
2515 2515 if not opts.get('dry_run'):
2516 2516 def checkout(f):
2517 2517 fc = ctx[f]
2518 2518 repo.wwrite(f, fc.data(), fc.flags())
2519 2519
2520 2520 audit_path = util.path_auditor(repo.root)
2521 2521 for f in remove[0]:
2522 2522 if repo.dirstate[f] == 'a':
2523 2523 repo.dirstate.forget(f)
2524 2524 continue
2525 2525 audit_path(f)
2526 2526 try:
2527 2527 util.unlink(repo.wjoin(f))
2528 2528 except OSError:
2529 2529 pass
2530 2530 repo.dirstate.remove(f)
2531 2531
2532 2532 normal = None
2533 2533 if node == parent:
2534 2534 # We're reverting to our parent. If possible, we'd like status
2535 2535 # to report the file as clean. We have to use normallookup for
2536 2536 # merges to avoid losing information about merged/dirty files.
2537 2537 if p2 != nullid:
2538 2538 normal = repo.dirstate.normallookup
2539 2539 else:
2540 2540 normal = repo.dirstate.normal
2541 2541 for f in revert[0]:
2542 2542 checkout(f)
2543 2543 if normal:
2544 2544 normal(f)
2545 2545
2546 2546 for f in add[0]:
2547 2547 checkout(f)
2548 2548 repo.dirstate.add(f)
2549 2549
2550 2550 normal = repo.dirstate.normallookup
2551 2551 if node == parent and p2 == nullid:
2552 2552 normal = repo.dirstate.normal
2553 2553 for f in undelete[0]:
2554 2554 checkout(f)
2555 2555 normal(f)
2556 2556
2557 2557 finally:
2558 2558 del wlock
2559 2559
2560 2560 def rollback(ui, repo):
2561 2561 """roll back the last transaction
2562 2562
2563 2563 This command should be used with care. There is only one level of
2564 2564 rollback, and there is no way to undo a rollback. It will also
2565 2565 restore the dirstate at the time of the last transaction, losing
2566 2566 any dirstate changes since that time.
2567 2567
2568 2568 Transactions are used to encapsulate the effects of all commands
2569 2569 that create new changesets or propagate existing changesets into a
2570 2570 repository. For example, the following commands are transactional,
2571 2571 and their effects can be rolled back:
2572 2572
2573 2573 commit
2574 2574 import
2575 2575 pull
2576 2576 push (with this repository as destination)
2577 2577 unbundle
2578 2578
2579 2579 This command is not intended for use on public repositories. Once
2580 2580 changes are visible for pull by other users, rolling a transaction
2581 2581 back locally is ineffective (someone else may already have pulled
2582 2582 the changes). Furthermore, a race is possible with readers of the
2583 2583 repository; for example an in-progress pull from the repository
2584 2584 may fail if a rollback is performed.
2585 2585 """
2586 2586 repo.rollback()
2587 2587
2588 2588 def root(ui, repo):
2589 2589 """print the root (top) of the current working dir
2590 2590
2591 2591 Print the root directory of the current repository.
2592 2592 """
2593 2593 ui.write(repo.root + "\n")
2594 2594
2595 2595 def serve(ui, repo, **opts):
2596 2596 """export the repository via HTTP
2597 2597
2598 2598 Start a local HTTP repository browser and pull server.
2599 2599
2600 2600 By default, the server logs accesses to stdout and errors to
2601 2601 stderr. Use the "-A" and "-E" options to log to files.
2602 2602 """
2603 2603
2604 2604 if opts["stdio"]:
2605 2605 if repo is None:
2606 2606 raise RepoError(_("There is no Mercurial repository here"
2607 2607 " (.hg not found)"))
2608 2608 s = sshserver.sshserver(ui, repo)
2609 2609 s.serve_forever()
2610 2610
2611 2611 parentui = ui.parentui or ui
2612 2612 optlist = ("name templates style address port prefix ipv6"
2613 2613 " accesslog errorlog webdir_conf certificate")
2614 2614 for o in optlist.split():
2615 2615 if opts[o]:
2616 2616 parentui.setconfig("web", o, str(opts[o]))
2617 2617 if (repo is not None) and (repo.ui != parentui):
2618 2618 repo.ui.setconfig("web", o, str(opts[o]))
2619 2619
2620 2620 if repo is None and not ui.config("web", "webdir_conf"):
2621 2621 raise RepoError(_("There is no Mercurial repository here"
2622 2622 " (.hg not found)"))
2623 2623
2624 2624 class service:
2625 2625 def init(self):
2626 2626 util.set_signal_handler()
2627 2627 self.httpd = hgweb.server.create_server(parentui, repo)
2628 2628
2629 2629 if not ui.verbose: return
2630 2630
2631 2631 if self.httpd.prefix:
2632 2632 prefix = self.httpd.prefix.strip('/') + '/'
2633 2633 else:
2634 2634 prefix = ''
2635 2635
2636 2636 port = ':%d' % self.httpd.port
2637 2637 if port == ':80':
2638 2638 port = ''
2639 2639
2640 2640 bindaddr = self.httpd.addr
2641 2641 if bindaddr == '0.0.0.0':
2642 2642 bindaddr = '*'
2643 2643 elif ':' in bindaddr: # IPv6
2644 2644 bindaddr = '[%s]' % bindaddr
2645 2645
2646 2646 fqaddr = self.httpd.fqaddr
2647 2647 if ':' in fqaddr:
2648 2648 fqaddr = '[%s]' % fqaddr
2649 2649 ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
2650 2650 (fqaddr, port, prefix, bindaddr, self.httpd.port))
2651 2651
2652 2652 def run(self):
2653 2653 self.httpd.serve_forever()
2654 2654
2655 2655 service = service()
2656 2656
2657 2657 cmdutil.service(opts, initfn=service.init, runfn=service.run)
2658 2658
2659 2659 def status(ui, repo, *pats, **opts):
2660 2660 """show changed files in the working directory
2661 2661
2662 2662 Show status of files in the repository. If names are given, only
2663 2663 files that match are shown. Files that are clean or ignored or
2664 2664 source of a copy/move operation, are not listed unless -c (clean),
2665 2665 -i (ignored), -C (copies) or -A is given. Unless options described
2666 2666 with "show only ..." are given, the options -mardu are used.
2667 2667
2668 2668 Option -q/--quiet hides untracked (unknown and ignored) files
2669 2669 unless explicitly requested with -u/--unknown or -i/-ignored.
2670 2670
2671 2671 NOTE: status may appear to disagree with diff if permissions have
2672 2672 changed or a merge has occurred. The standard diff format does not
2673 2673 report permission changes and diff only reports changes relative
2674 2674 to one merge parent.
2675 2675
2676 2676 If one revision is given, it is used as the base revision.
2677 2677 If two revisions are given, the difference between them is shown.
2678 2678
2679 2679 The codes used to show the status of files are:
2680 2680 M = modified
2681 2681 A = added
2682 2682 R = removed
2683 2683 C = clean
2684 2684 ! = deleted, but still tracked
2685 2685 ? = not tracked
2686 2686 I = ignored
2687 2687 = the previous added file was copied from here
2688 2688 """
2689 2689
2690 2690 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
2691 2691 cwd = (pats and repo.getcwd()) or ''
2692 2692 end = opts.get('print0') and '\0' or '\n'
2693 2693 copy = {}
2694 2694 states = 'modified added removed deleted unknown ignored clean'.split()
2695 2695 show = [k for k in states if opts[k]]
2696 2696 if opts.get('all'):
2697 2697 show += ui.quiet and (states[:4] + ['clean']) or states
2698 2698 if not show:
2699 2699 show = ui.quiet and states[:4] or states[:5]
2700 2700
2701 2701 stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
2702 2702 'ignored' in show, 'clean' in show, 'unknown' in show)
2703 2703 changestates = zip(states, 'MAR!?IC', stat)
2704 2704
2705 2705 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
2706 2706 ctxn = repo[nullid]
2707 2707 ctx1 = repo[node1]
2708 2708 ctx2 = repo[node2]
2709 2709 added = stat[1]
2710 2710 if node2 is None:
2711 2711 added = stat[0] + stat[1] # merged?
2712 2712
2713 2713 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].items():
2714 2714 if k in added:
2715 2715 copy[k] = v
2716 2716 elif v in added:
2717 2717 copy[v] = k
2718 2718
2719 2719 for state, char, files in changestates:
2720 2720 if state in show:
2721 2721 format = "%s %%s%s" % (char, end)
2722 2722 if opts.get('no_status'):
2723 2723 format = "%%s%s" % end
2724 2724
2725 2725 for f in files:
2726 2726 ui.write(format % repo.pathto(f, cwd))
2727 2727 if f in copy:
2728 2728 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
2729 2729
2730 2730 def tag(ui, repo, name1, *names, **opts):
2731 2731 """add one or more tags for the current or given revision
2732 2732
2733 2733 Name a particular revision using <name>.
2734 2734
2735 2735 Tags are used to name particular revisions of the repository and are
2736 2736 very useful to compare different revisions, to go back to significant
2737 2737 earlier versions or to mark branch points as releases, etc.
2738 2738
2739 2739 If no revision is given, the parent of the working directory is used,
2740 2740 or tip if no revision is checked out.
2741 2741
2742 2742 To facilitate version control, distribution, and merging of tags,
2743 2743 they are stored as a file named ".hgtags" which is managed
2744 2744 similarly to other project files and can be hand-edited if
2745 2745 necessary. The file '.hg/localtags' is used for local tags (not
2746 2746 shared among repositories).
2747 2747
2748 2748 See 'hg help dates' for a list of formats valid for -d/--date.
2749 2749 """
2750 2750
2751 2751 rev_ = "."
2752 2752 names = (name1,) + names
2753 2753 if len(names) != len(dict.fromkeys(names)):
2754 2754 raise util.Abort(_('tag names must be unique'))
2755 2755 for n in names:
2756 2756 if n in ['tip', '.', 'null']:
2757 2757 raise util.Abort(_('the name \'%s\' is reserved') % n)
2758 2758 if opts.get('rev') and opts.get('remove'):
2759 2759 raise util.Abort(_("--rev and --remove are incompatible"))
2760 2760 if opts.get('rev'):
2761 2761 rev_ = opts['rev']
2762 2762 message = opts.get('message')
2763 2763 if opts.get('remove'):
2764 2764 expectedtype = opts.get('local') and 'local' or 'global'
2765 2765 for n in names:
2766 2766 if not repo.tagtype(n):
2767 2767 raise util.Abort(_('tag \'%s\' does not exist') % n)
2768 2768 if repo.tagtype(n) != expectedtype:
2769 2769 raise util.Abort(_('tag \'%s\' is not a %s tag') %
2770 2770 (n, expectedtype))
2771 2771 rev_ = nullid
2772 2772 if not message:
2773 2773 message = _('Removed tag %s') % ', '.join(names)
2774 2774 elif not opts.get('force'):
2775 2775 for n in names:
2776 2776 if n in repo.tags():
2777 2777 raise util.Abort(_('tag \'%s\' already exists '
2778 2778 '(use -f to force)') % n)
2779 2779 if not rev_ and repo.dirstate.parents()[1] != nullid:
2780 2780 raise util.Abort(_('uncommitted merge - please provide a '
2781 2781 'specific revision'))
2782 2782 r = repo[rev_].node()
2783 2783
2784 2784 if not message:
2785 2785 message = (_('Added tag %s for changeset %s') %
2786 2786 (', '.join(names), short(r)))
2787 2787
2788 2788 date = opts.get('date')
2789 2789 if date:
2790 2790 date = util.parsedate(date)
2791 2791
2792 2792 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
2793 2793
2794 2794 def tags(ui, repo):
2795 2795 """list repository tags
2796 2796
2797 2797 List the repository tags.
2798 2798
2799 2799 This lists both regular and local tags. When the -v/--verbose switch
2800 2800 is used, a third column "local" is printed for local tags.
2801 2801 """
2802 2802
2803 2803 l = repo.tagslist()
2804 2804 l.reverse()
2805 2805 hexfunc = ui.debugflag and hex or short
2806 2806 tagtype = ""
2807 2807
2808 2808 for t, n in l:
2809 2809 if ui.quiet:
2810 2810 ui.write("%s\n" % t)
2811 2811 continue
2812 2812
2813 2813 try:
2814 2814 hn = hexfunc(n)
2815 2815 r = "%5d:%s" % (repo.changelog.rev(n), hn)
2816 2816 except revlog.LookupError:
2817 2817 r = " ?:%s" % hn
2818 2818 else:
2819 2819 spaces = " " * (30 - util.locallen(t))
2820 2820 if ui.verbose:
2821 2821 if repo.tagtype(t) == 'local':
2822 2822 tagtype = " local"
2823 2823 else:
2824 2824 tagtype = ""
2825 2825 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
2826 2826
2827 2827 def tip(ui, repo, **opts):
2828 2828 """show the tip revision
2829 2829
2830 2830 The tip revision (usually just called the tip) is the most
2831 2831 recently added changeset in the repository, the most recently
2832 2832 changed head.
2833 2833
2834 2834 If you have just made a commit, that commit will be the tip. If
2835 2835 you have just pulled changes from another repository, the tip of
2836 2836 that repository becomes the current tip. The "tip" tag is special
2837 2837 and cannot be renamed or assigned to a different changeset.
2838 2838 """
2839 2839 cmdutil.show_changeset(ui, repo, opts).show(len(repo) - 1)
2840 2840
2841 2841 def unbundle(ui, repo, fname1, *fnames, **opts):
2842 2842 """apply one or more changegroup files
2843 2843
2844 2844 Apply one or more compressed changegroup files generated by the
2845 2845 bundle command.
2846 2846 """
2847 2847 fnames = (fname1,) + fnames
2848 2848
2849 2849 lock = None
2850 2850 try:
2851 2851 lock = repo.lock()
2852 2852 for fname in fnames:
2853 2853 f = url.open(ui, fname)
2854 2854 gen = changegroup.readbundle(f, fname)
2855 2855 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
2856 2856 finally:
2857 2857 del lock
2858 2858
2859 2859 return postincoming(ui, repo, modheads, opts.get('update'), None)
2860 2860
2861 2861 def update(ui, repo, node=None, rev=None, clean=False, date=None):
2862 2862 """update working directory
2863 2863
2864 2864 Update the repository's working directory to the specified revision,
2865 2865 or the tip of the current branch if none is specified. Use null as
2866 2866 the revision to remove the working copy (like 'hg clone -U').
2867 2867
2868 2868 When the working dir contains no uncommitted changes, it will be
2869 2869 replaced by the state of the requested revision from the repo. When
2870 2870 the requested revision is on a different branch, the working dir
2871 2871 will additionally be switched to that branch.
2872 2872
2873 2873 When there are uncommitted changes, use option -C to discard them,
2874 2874 forcibly replacing the state of the working dir with the requested
2875 2875 revision.
2876 2876
2877 2877 When there are uncommitted changes and option -C is not used, and
2878 2878 the parent revision and requested revision are on the same branch,
2879 2879 and one of them is an ancestor of the other, then the new working
2880 2880 directory will contain the requested revision merged with the
2881 2881 uncommitted changes. Otherwise, the update will fail with a
2882 2882 suggestion to use 'merge' or 'update -C' instead.
2883 2883
2884 2884 If you want to update just one file to an older revision, use revert.
2885 2885
2886 2886 See 'hg help dates' for a list of formats valid for --date.
2887 2887 """
2888 2888 if rev and node:
2889 2889 raise util.Abort(_("please specify just one revision"))
2890 2890
2891 2891 if not rev:
2892 2892 rev = node
2893 2893
2894 2894 if date:
2895 2895 if rev:
2896 2896 raise util.Abort(_("you can't specify a revision and a date"))
2897 2897 rev = cmdutil.finddate(ui, repo, date)
2898 2898
2899 2899 if clean:
2900 2900 return hg.clean(repo, rev)
2901 2901 else:
2902 2902 return hg.update(repo, rev)
2903 2903
2904 2904 def verify(ui, repo):
2905 2905 """verify the integrity of the repository
2906 2906
2907 2907 Verify the integrity of the current repository.
2908 2908
2909 2909 This will perform an extensive check of the repository's
2910 2910 integrity, validating the hashes and checksums of each entry in
2911 2911 the changelog, manifest, and tracked files, as well as the
2912 2912 integrity of their crosslinks and indices.
2913 2913 """
2914 2914 return hg.verify(repo)
2915 2915
2916 2916 def version_(ui):
2917 2917 """output version and copyright information"""
2918 2918 ui.write(_("Mercurial Distributed SCM (version %s)\n")
2919 2919 % version.get_version())
2920 2920 ui.status(_(
2921 2921 "\nCopyright (C) 2005-2008 Matt Mackall <mpm@selenic.com> and others\n"
2922 2922 "This is free software; see the source for copying conditions. "
2923 2923 "There is NO\nwarranty; "
2924 2924 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
2925 2925 ))
2926 2926
2927 2927 # Command options and aliases are listed here, alphabetically
2928 2928
2929 2929 globalopts = [
2930 2930 ('R', 'repository', '',
2931 2931 _('repository root directory or symbolic path name')),
2932 2932 ('', 'cwd', '', _('change working directory')),
2933 2933 ('y', 'noninteractive', None,
2934 2934 _('do not prompt, assume \'yes\' for any required answers')),
2935 2935 ('q', 'quiet', None, _('suppress output')),
2936 2936 ('v', 'verbose', None, _('enable additional output')),
2937 2937 ('', 'config', [], _('set/override config option')),
2938 2938 ('', 'debug', None, _('enable debugging output')),
2939 2939 ('', 'debugger', None, _('start debugger')),
2940 2940 ('', 'encoding', util._encoding, _('set the charset encoding')),
2941 2941 ('', 'encodingmode', util._encodingmode, _('set the charset encoding mode')),
2942 2942 ('', 'lsprof', None, _('print improved command execution profile')),
2943 2943 ('', 'traceback', None, _('print traceback on exception')),
2944 2944 ('', 'time', None, _('time how long the command takes')),
2945 2945 ('', 'profile', None, _('print command execution profile')),
2946 2946 ('', 'version', None, _('output version information and exit')),
2947 2947 ('h', 'help', None, _('display help and exit')),
2948 2948 ]
2949 2949
2950 2950 dryrunopts = [('n', 'dry-run', None,
2951 2951 _('do not perform actions, just print output'))]
2952 2952
2953 2953 remoteopts = [
2954 2954 ('e', 'ssh', '', _('specify ssh command to use')),
2955 2955 ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
2956 2956 ]
2957 2957
2958 2958 walkopts = [
2959 2959 ('I', 'include', [], _('include names matching the given patterns')),
2960 2960 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2961 2961 ]
2962 2962
2963 2963 commitopts = [
2964 2964 ('m', 'message', '', _('use <text> as commit message')),
2965 2965 ('l', 'logfile', '', _('read commit message from <file>')),
2966 2966 ]
2967 2967
2968 2968 commitopts2 = [
2969 2969 ('d', 'date', '', _('record datecode as commit date')),
2970 2970 ('u', 'user', '', _('record user as committer')),
2971 2971 ]
2972 2972
2973 2973 templateopts = [
2974 2974 ('', 'style', '', _('display using template map file')),
2975 2975 ('', 'template', '', _('display with template')),
2976 2976 ]
2977 2977
2978 2978 logopts = [
2979 2979 ('p', 'patch', None, _('show patch')),
2980 2980 ('l', 'limit', '', _('limit number of changes displayed')),
2981 2981 ('M', 'no-merges', None, _('do not show merges')),
2982 2982 ] + templateopts
2983 2983
2984 2984 diffopts = [
2985 2985 ('a', 'text', None, _('treat all files as text')),
2986 2986 ('g', 'git', None, _('use git extended diff format')),
2987 2987 ('', 'nodates', None, _("don't include dates in diff headers"))
2988 2988 ]
2989 2989
2990 2990 diffopts2 = [
2991 2991 ('p', 'show-function', None, _('show which function each change is in')),
2992 2992 ('w', 'ignore-all-space', None,
2993 2993 _('ignore white space when comparing lines')),
2994 2994 ('b', 'ignore-space-change', None,
2995 2995 _('ignore changes in the amount of white space')),
2996 2996 ('B', 'ignore-blank-lines', None,
2997 2997 _('ignore changes whose lines are all blank')),
2998 2998 ('U', 'unified', '', _('number of lines of context to show'))
2999 2999 ]
3000 3000
3001 3001 table = {
3002 3002 "^add": (add, walkopts + dryrunopts, _('hg add [OPTION]... [FILE]...')),
3003 3003 "addremove":
3004 3004 (addremove,
3005 3005 [('s', 'similarity', '',
3006 3006 _('guess renamed files by similarity (0<=s<=100)')),
3007 3007 ] + walkopts + dryrunopts,
3008 3008 _('hg addremove [OPTION]... [FILE]...')),
3009 3009 "^annotate|blame":
3010 3010 (annotate,
3011 3011 [('r', 'rev', '', _('annotate the specified revision')),
3012 3012 ('f', 'follow', None, _('follow file copies and renames')),
3013 3013 ('a', 'text', None, _('treat all files as text')),
3014 3014 ('u', 'user', None, _('list the author (long with -v)')),
3015 3015 ('d', 'date', None, _('list the date (short with -q)')),
3016 3016 ('n', 'number', None, _('list the revision number (default)')),
3017 3017 ('c', 'changeset', None, _('list the changeset')),
3018 3018 ('l', 'line-number', None,
3019 3019 _('show line number at the first appearance'))
3020 3020 ] + walkopts,
3021 3021 _('hg annotate [-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
3022 3022 "archive":
3023 3023 (archive,
3024 3024 [('', 'no-decode', None, _('do not pass files through decoders')),
3025 3025 ('p', 'prefix', '', _('directory prefix for files in archive')),
3026 3026 ('r', 'rev', '', _('revision to distribute')),
3027 3027 ('t', 'type', '', _('type of distribution to create')),
3028 3028 ] + walkopts,
3029 3029 _('hg archive [OPTION]... DEST')),
3030 3030 "backout":
3031 3031 (backout,
3032 3032 [('', 'merge', None,
3033 3033 _('merge with old dirstate parent after backout')),
3034 3034 ('', 'parent', '', _('parent to choose when backing out merge')),
3035 3035 ('r', 'rev', '', _('revision to backout')),
3036 3036 ] + walkopts + commitopts + commitopts2,
3037 3037 _('hg backout [OPTION]... [-r] REV')),
3038 3038 "bisect":
3039 3039 (bisect,
3040 3040 [('r', 'reset', False, _('reset bisect state')),
3041 3041 ('g', 'good', False, _('mark changeset good')),
3042 3042 ('b', 'bad', False, _('mark changeset bad')),
3043 3043 ('s', 'skip', False, _('skip testing changeset')),
3044 3044 ('c', 'command', '', _('Use command to check changeset state')),
3045 3045 ('U', 'noupdate', False, _('do not update to target'))],
3046 3046 _("hg bisect [-gbsr] [-c CMD] [REV]")),
3047 3047 "branch":
3048 3048 (branch,
3049 3049 [('f', 'force', None,
3050 3050 _('set branch name even if it shadows an existing branch')),
3051 3051 ('C', 'clean', None, _('reset branch name to parent branch name'))],
3052 3052 _('hg branch [-fC] [NAME]')),
3053 3053 "branches":
3054 3054 (branches,
3055 3055 [('a', 'active', False,
3056 3056 _('show only branches that have unmerged heads'))],
3057 3057 _('hg branches [-a]')),
3058 3058 "bundle":
3059 3059 (bundle,
3060 3060 [('f', 'force', None,
3061 3061 _('run even when remote repository is unrelated')),
3062 3062 ('r', 'rev', [],
3063 3063 _('a changeset up to which you would like to bundle')),
3064 3064 ('', 'base', [],
3065 3065 _('a base changeset to specify instead of a destination')),
3066 3066 ('a', 'all', None, _('bundle all changesets in the repository')),
3067 3067 ('t', 'type', 'bzip2', _('bundle compression type to use')),
3068 3068 ] + remoteopts,
3069 3069 _('hg bundle [-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
3070 3070 "cat":
3071 3071 (cat,
3072 3072 [('o', 'output', '', _('print output to file with formatted name')),
3073 3073 ('r', 'rev', '', _('print the given revision')),
3074 3074 ('', 'decode', None, _('apply any matching decode filter')),
3075 3075 ] + walkopts,
3076 3076 _('hg cat [OPTION]... FILE...')),
3077 3077 "^clone":
3078 3078 (clone,
3079 3079 [('U', 'noupdate', None,
3080 3080 _('the clone will only contain a repository (no working copy)')),
3081 3081 ('r', 'rev', [],
3082 3082 _('a changeset you would like to have after cloning')),
3083 3083 ('', 'pull', None, _('use pull protocol to copy metadata')),
3084 3084 ('', 'uncompressed', None,
3085 3085 _('use uncompressed transfer (fast over LAN)')),
3086 3086 ] + remoteopts,
3087 3087 _('hg clone [OPTION]... SOURCE [DEST]')),
3088 3088 "^commit|ci":
3089 3089 (commit,
3090 3090 [('A', 'addremove', None,
3091 3091 _('mark new/missing files as added/removed before committing')),
3092 3092 ] + walkopts + commitopts + commitopts2,
3093 3093 _('hg commit [OPTION]... [FILE]...')),
3094 3094 "copy|cp":
3095 3095 (copy,
3096 3096 [('A', 'after', None, _('record a copy that has already occurred')),
3097 3097 ('f', 'force', None,
3098 3098 _('forcibly copy over an existing managed file')),
3099 3099 ] + walkopts + dryrunopts,
3100 3100 _('hg copy [OPTION]... [SOURCE]... DEST')),
3101 3101 "debugancestor": (debugancestor, [],
3102 3102 _('hg debugancestor [INDEX] REV1 REV2')),
3103 3103 "debugcheckstate": (debugcheckstate, [], _('hg debugcheckstate')),
3104 3104 "debugcomplete":
3105 3105 (debugcomplete,
3106 3106 [('o', 'options', None, _('show the command options'))],
3107 3107 _('hg debugcomplete [-o] CMD')),
3108 3108 "debugdate":
3109 3109 (debugdate,
3110 3110 [('e', 'extended', None, _('try extended date formats'))],
3111 3111 _('hg debugdate [-e] DATE [RANGE]')),
3112 3112 "debugdata": (debugdata, [], _('hg debugdata FILE REV')),
3113 3113 "debugfsinfo": (debugfsinfo, [], _('hg debugfsinfo [PATH]')),
3114 3114 "debugindex": (debugindex, [], _('hg debugindex FILE')),
3115 3115 "debugindexdot": (debugindexdot, [], _('hg debugindexdot FILE')),
3116 3116 "debuginstall": (debuginstall, [], _('hg debuginstall')),
3117 3117 "debugrawcommit|rawcommit":
3118 3118 (rawcommit,
3119 3119 [('p', 'parent', [], _('parent')),
3120 3120 ('F', 'files', '', _('file list'))
3121 3121 ] + commitopts + commitopts2,
3122 3122 _('hg debugrawcommit [OPTION]... [FILE]...')),
3123 3123 "debugrebuildstate":
3124 3124 (debugrebuildstate,
3125 3125 [('r', 'rev', '', _('revision to rebuild to'))],
3126 3126 _('hg debugrebuildstate [-r REV] [REV]')),
3127 3127 "debugrename":
3128 3128 (debugrename,
3129 3129 [('r', 'rev', '', _('revision to debug'))],
3130 3130 _('hg debugrename [-r REV] FILE')),
3131 3131 "debugsetparents":
3132 3132 (debugsetparents,
3133 3133 [],
3134 3134 _('hg debugsetparents REV1 [REV2]')),
3135 3135 "debugstate":
3136 3136 (debugstate,
3137 3137 [('', 'nodates', None, _('do not display the saved mtime'))],
3138 3138 _('hg debugstate [OPTION]...')),
3139 3139 "debugwalk": (debugwalk, walkopts, _('hg debugwalk [OPTION]... [FILE]...')),
3140 3140 "^diff":
3141 3141 (diff,
3142 3142 [('r', 'rev', [], _('revision'))
3143 3143 ] + diffopts + diffopts2 + walkopts,
3144 3144 _('hg diff [OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
3145 3145 "^export":
3146 3146 (export,
3147 3147 [('o', 'output', '', _('print output to file with formatted name')),
3148 3148 ('', 'switch-parent', None, _('diff against the second parent'))
3149 3149 ] + diffopts,
3150 3150 _('hg export [OPTION]... [-o OUTFILESPEC] REV...')),
3151 3151 "grep":
3152 3152 (grep,
3153 3153 [('0', 'print0', None, _('end fields with NUL')),
3154 3154 ('', 'all', None, _('print all revisions that match')),
3155 3155 ('f', 'follow', None,
3156 3156 _('follow changeset history, or file history across copies and renames')),
3157 3157 ('i', 'ignore-case', None, _('ignore case when matching')),
3158 3158 ('l', 'files-with-matches', None,
3159 3159 _('print only filenames and revs that match')),
3160 3160 ('n', 'line-number', None, _('print matching line numbers')),
3161 3161 ('r', 'rev', [], _('search in given revision range')),
3162 3162 ('u', 'user', None, _('list the author (long with -v)')),
3163 3163 ('d', 'date', None, _('list the date (short with -q)')),
3164 3164 ] + walkopts,
3165 3165 _('hg grep [OPTION]... PATTERN [FILE]...')),
3166 3166 "heads":
3167 3167 (heads,
3168 3168 [('r', 'rev', '', _('show only heads which are descendants of rev')),
3169 3169 ] + templateopts,
3170 3170 _('hg heads [-r REV] [REV]...')),
3171 3171 "help": (help_, [], _('hg help [TOPIC]')),
3172 3172 "identify|id":
3173 3173 (identify,
3174 3174 [('r', 'rev', '', _('identify the specified rev')),
3175 3175 ('n', 'num', None, _('show local revision number')),
3176 3176 ('i', 'id', None, _('show global revision id')),
3177 3177 ('b', 'branch', None, _('show branch')),
3178 3178 ('t', 'tags', None, _('show tags'))],
3179 3179 _('hg identify [-nibt] [-r REV] [SOURCE]')),
3180 3180 "import|patch":
3181 3181 (import_,
3182 3182 [('p', 'strip', 1,
3183 3183 _('directory strip option for patch. This has the same\n'
3184 3184 'meaning as the corresponding patch option')),
3185 3185 ('b', 'base', '', _('base path')),
3186 3186 ('f', 'force', None,
3187 3187 _('skip check for outstanding uncommitted changes')),
3188 3188 ('', 'no-commit', None, _("don't commit, just update the working directory")),
3189 3189 ('', 'exact', None,
3190 3190 _('apply patch to the nodes from which it was generated')),
3191 3191 ('', 'import-branch', None,
3192 3192 _('Use any branch information in patch (implied by --exact)'))] +
3193 3193 commitopts + commitopts2,
3194 3194 _('hg import [OPTION]... PATCH...')),
3195 3195 "incoming|in":
3196 3196 (incoming,
3197 3197 [('f', 'force', None,
3198 3198 _('run even when remote repository is unrelated')),
3199 3199 ('n', 'newest-first', None, _('show newest record first')),
3200 3200 ('', 'bundle', '', _('file to store the bundles into')),
3201 3201 ('r', 'rev', [],
3202 3202 _('a specific revision up to which you would like to pull')),
3203 3203 ] + logopts + remoteopts,
3204 3204 _('hg incoming [-p] [-n] [-M] [-f] [-r REV]...'
3205 3205 ' [--bundle FILENAME] [SOURCE]')),
3206 3206 "^init":
3207 3207 (init,
3208 3208 remoteopts,
3209 3209 _('hg init [-e CMD] [--remotecmd CMD] [DEST]')),
3210 3210 "locate":
3211 3211 (locate,
3212 3212 [('r', 'rev', '', _('search the repository as it stood at rev')),
3213 3213 ('0', 'print0', None,
3214 3214 _('end filenames with NUL, for use with xargs')),
3215 3215 ('f', 'fullpath', None,
3216 3216 _('print complete paths from the filesystem root')),
3217 3217 ] + walkopts,
3218 3218 _('hg locate [OPTION]... [PATTERN]...')),
3219 3219 "^log|history":
3220 3220 (log,
3221 3221 [('f', 'follow', None,
3222 3222 _('follow changeset history, or file history across copies and renames')),
3223 3223 ('', 'follow-first', None,
3224 3224 _('only follow the first parent of merge changesets')),
3225 3225 ('d', 'date', '', _('show revs matching date spec')),
3226 3226 ('C', 'copies', None, _('show copied files')),
3227 3227 ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
3228 3228 ('r', 'rev', [], _('show the specified revision or range')),
3229 3229 ('', 'removed', None, _('include revs where files were removed')),
3230 3230 ('m', 'only-merges', None, _('show only merges')),
3231 3231 ('u', 'user', [], _('revs committed by user')),
3232 3232 ('b', 'only-branch', [],
3233 3233 _('show only changesets within the given named branch')),
3234 3234 ('P', 'prune', [], _('do not display revision or any of its ancestors')),
3235 3235 ] + logopts + walkopts,
3236 3236 _('hg log [OPTION]... [FILE]')),
3237 3237 "manifest":
3238 3238 (manifest,
3239 3239 [('r', 'rev', '', _('revision to display'))],
3240 3240 _('hg manifest [-r REV]')),
3241 3241 "^merge":
3242 3242 (merge,
3243 3243 [('f', 'force', None, _('force a merge with outstanding changes')),
3244 3244 ('r', 'rev', '', _('revision to merge')),
3245 3245 ],
3246 3246 _('hg merge [-f] [[-r] REV]')),
3247 3247 "outgoing|out":
3248 3248 (outgoing,
3249 3249 [('f', 'force', None,
3250 3250 _('run even when remote repository is unrelated')),
3251 3251 ('r', 'rev', [],
3252 3252 _('a specific revision up to which you would like to push')),
3253 3253 ('n', 'newest-first', None, _('show newest record first')),
3254 3254 ] + logopts + remoteopts,
3255 3255 _('hg outgoing [-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
3256 3256 "^parents":
3257 3257 (parents,
3258 3258 [('r', 'rev', '', _('show parents from the specified rev')),
3259 3259 ] + templateopts,
3260 3260 _('hg parents [-r REV] [FILE]')),
3261 3261 "paths": (paths, [], _('hg paths [NAME]')),
3262 3262 "^pull":
3263 3263 (pull,
3264 3264 [('u', 'update', None,
3265 3265 _('update to new tip if changesets were pulled')),
3266 3266 ('f', 'force', None,
3267 3267 _('run even when remote repository is unrelated')),
3268 3268 ('r', 'rev', [],
3269 3269 _('a specific revision up to which you would like to pull')),
3270 3270 ] + remoteopts,
3271 3271 _('hg pull [-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
3272 3272 "^push":
3273 3273 (push,
3274 3274 [('f', 'force', None, _('force push')),
3275 3275 ('r', 'rev', [],
3276 3276 _('a specific revision up to which you would like to push')),
3277 3277 ] + remoteopts,
3278 3278 _('hg push [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
3279 3279 "recover": (recover, [], _('hg recover')),
3280 3280 "^remove|rm":
3281 3281 (remove,
3282 3282 [('A', 'after', None, _('record delete for missing files')),
3283 3283 ('f', 'force', None,
3284 3284 _('remove (and delete) file even if added or modified')),
3285 3285 ] + walkopts,
3286 3286 _('hg remove [OPTION]... FILE...')),
3287 3287 "rename|mv":
3288 3288 (rename,
3289 3289 [('A', 'after', None, _('record a rename that has already occurred')),
3290 3290 ('f', 'force', None,
3291 3291 _('forcibly copy over an existing managed file')),
3292 3292 ] + walkopts + dryrunopts,
3293 3293 _('hg rename [OPTION]... SOURCE... DEST')),
3294 3294 "resolve":
3295 3295 (resolve,
3296 3296 [('l', 'list', None, _('list state of files needing merge')),
3297 3297 ('m', 'mark', None, _('mark files as resolved')),
3298 3298 ('u', 'unmark', None, _('unmark files as resolved'))],
3299 3299 _('hg resolve [OPTION]... [FILE]...')),
3300 3300 "revert":
3301 3301 (revert,
3302 3302 [('a', 'all', None, _('revert all changes when no arguments given')),
3303 3303 ('d', 'date', '', _('tipmost revision matching date')),
3304 3304 ('r', 'rev', '', _('revision to revert to')),
3305 3305 ('', 'no-backup', None, _('do not save backup copies of files')),
3306 3306 ] + walkopts + dryrunopts,
3307 3307 _('hg revert [OPTION]... [-r REV] [NAME]...')),
3308 3308 "rollback": (rollback, [], _('hg rollback')),
3309 3309 "root": (root, [], _('hg root')),
3310 3310 "^serve":
3311 3311 (serve,
3312 3312 [('A', 'accesslog', '', _('name of access log file to write to')),
3313 3313 ('d', 'daemon', None, _('run server in background')),
3314 3314 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3315 3315 ('E', 'errorlog', '', _('name of error log file to write to')),
3316 3316 ('p', 'port', 0, _('port to listen on (default: 8000)')),
3317 3317 ('a', 'address', '', _('address to listen on (default: all interfaces)')),
3318 3318 ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
3319 3319 ('n', 'name', '',
3320 3320 _('name to show in web pages (default: working dir)')),
3321 3321 ('', 'webdir-conf', '', _('name of the webdir config file'
3322 3322 ' (serve more than one repo)')),
3323 3323 ('', 'pid-file', '', _('name of file to write process ID to')),
3324 3324 ('', 'stdio', None, _('for remote clients')),
3325 3325 ('t', 'templates', '', _('web templates to use')),
3326 3326 ('', 'style', '', _('template style to use')),
3327 3327 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
3328 3328 ('', 'certificate', '', _('SSL certificate file'))],
3329 3329 _('hg serve [OPTION]...')),
3330 3330 "showconfig|debugconfig":
3331 3331 (showconfig,
3332 3332 [('u', 'untrusted', None, _('show untrusted configuration options'))],
3333 3333 _('hg showconfig [-u] [NAME]...')),
3334 3334 "^status|st":
3335 3335 (status,
3336 3336 [('A', 'all', None, _('show status of all files')),
3337 3337 ('m', 'modified', None, _('show only modified files')),
3338 3338 ('a', 'added', None, _('show only added files')),
3339 3339 ('r', 'removed', None, _('show only removed files')),
3340 3340 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3341 3341 ('c', 'clean', None, _('show only files without changes')),
3342 3342 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3343 3343 ('i', 'ignored', None, _('show only ignored files')),
3344 3344 ('n', 'no-status', None, _('hide status prefix')),
3345 3345 ('C', 'copies', None, _('show source of copied files')),
3346 3346 ('0', 'print0', None,
3347 3347 _('end filenames with NUL, for use with xargs')),
3348 3348 ('', 'rev', [], _('show difference from revision')),
3349 3349 ] + walkopts,
3350 3350 _('hg status [OPTION]... [FILE]...')),
3351 3351 "tag":
3352 3352 (tag,
3353 3353 [('f', 'force', None, _('replace existing tag')),
3354 3354 ('l', 'local', None, _('make the tag local')),
3355 3355 ('r', 'rev', '', _('revision to tag')),
3356 3356 ('', 'remove', None, _('remove a tag')),
3357 3357 # -l/--local is already there, commitopts cannot be used
3358 3358 ('m', 'message', '', _('use <text> as commit message')),
3359 3359 ] + commitopts2,
3360 3360 _('hg tag [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
3361 3361 "tags": (tags, [], _('hg tags')),
3362 3362 "tip":
3363 3363 (tip,
3364 3364 [('p', 'patch', None, _('show patch')),
3365 3365 ] + templateopts,
3366 3366 _('hg tip [-p]')),
3367 3367 "unbundle":
3368 3368 (unbundle,
3369 3369 [('u', 'update', None,
3370 3370 _('update to new tip if changesets were unbundled'))],
3371 3371 _('hg unbundle [-u] FILE...')),
3372 3372 "^update|up|checkout|co":
3373 3373 (update,
3374 3374 [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
3375 3375 ('d', 'date', '', _('tipmost revision matching date')),
3376 3376 ('r', 'rev', '', _('revision'))],
3377 3377 _('hg update [-C] [-d DATE] [[-r] REV]')),
3378 3378 "verify": (verify, [], _('hg verify')),
3379 3379 "version": (version_, [], _('hg version')),
3380 3380 }
3381 3381
3382 3382 norepo = ("clone init version help debugcomplete debugdata"
3383 3383 " debugindex debugindexdot debugdate debuginstall debugfsinfo")
3384 3384 optionalrepo = ("identify paths serve showconfig debugancestor")
@@ -1,795 +1,795 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, revlog, util, os, errno
11 11
12 12 class changectx(object):
13 13 """A changecontext object makes access to data related to a particular
14 14 changeset convenient."""
15 15 def __init__(self, repo, changeid=''):
16 16 """changeid is a revision number, node, or tag"""
17 17 if changeid == '':
18 18 changeid = '.'
19 19 self._repo = repo
20 20 self._node = self._repo.lookup(changeid)
21 21 self._rev = self._repo.changelog.rev(self._node)
22 22
23 23 def __str__(self):
24 24 return short(self.node())
25 25
26 26 def __int__(self):
27 27 return self.rev()
28 28
29 29 def __repr__(self):
30 30 return "<changectx %s>" % str(self)
31 31
32 32 def __hash__(self):
33 33 try:
34 34 return hash(self._rev)
35 35 except AttributeError:
36 36 return id(self)
37 37
38 38 def __eq__(self, other):
39 39 try:
40 40 return self._rev == other._rev
41 41 except AttributeError:
42 42 return False
43 43
44 44 def __ne__(self, other):
45 45 return not (self == other)
46 46
47 47 def __nonzero__(self):
48 48 return self._rev != nullrev
49 49
50 50 def __getattr__(self, name):
51 51 if name == '_changeset':
52 52 self._changeset = self._repo.changelog.read(self.node())
53 53 return self._changeset
54 54 elif name == '_manifest':
55 55 self._manifest = self._repo.manifest.read(self._changeset[0])
56 56 return self._manifest
57 57 elif name == '_manifestdelta':
58 58 md = self._repo.manifest.readdelta(self._changeset[0])
59 59 self._manifestdelta = md
60 60 return self._manifestdelta
61 61 elif name == '_parents':
62 62 p = self._repo.changelog.parents(self._node)
63 63 if p[1] == nullid:
64 64 p = p[:-1]
65 65 self._parents = [changectx(self._repo, x) for x in p]
66 66 return self._parents
67 67 else:
68 68 raise AttributeError(name)
69 69
70 70 def __contains__(self, key):
71 71 return key in self._manifest
72 72
73 73 def __getitem__(self, key):
74 74 return self.filectx(key)
75 75
76 76 def __iter__(self):
77 77 for f in util.sort(self._manifest):
78 78 yield f
79 79
80 80 def changeset(self): return self._changeset
81 81 def manifest(self): return self._manifest
82 82
83 83 def rev(self): return self._rev
84 84 def node(self): return self._node
85 85 def hex(self): return hex(self._node)
86 86 def user(self): return self._changeset[1]
87 87 def date(self): return self._changeset[2]
88 88 def files(self): return self._changeset[3]
89 89 def description(self): return self._changeset[4]
90 90 def branch(self): return self._changeset[5].get("branch")
91 91 def extra(self): return self._changeset[5]
92 92 def tags(self): return self._repo.nodetags(self._node)
93 93
94 94 def parents(self):
95 95 """return contexts for each parent changeset"""
96 96 return self._parents
97 97
98 98 def children(self):
99 99 """return contexts for each child changeset"""
100 100 c = self._repo.changelog.children(self._node)
101 101 return [changectx(self._repo, x) for x in c]
102 102
103 103 def ancestors(self):
104 104 for a in self._repo.changelog.ancestors(self._rev):
105 105 yield changectx(self._repo, a)
106 106
107 107 def descendants(self):
108 108 for d in self._repo.changelog.descendants(self._rev):
109 109 yield changectx(self._repo, d)
110 110
111 111 def _fileinfo(self, path):
112 112 if '_manifest' in self.__dict__:
113 113 try:
114 114 return self._manifest[path], self._manifest.flags(path)
115 115 except KeyError:
116 116 raise revlog.LookupError(self._node, path,
117 117 _('not found in manifest'))
118 118 if '_manifestdelta' in self.__dict__ or path in self.files():
119 119 if path in self._manifestdelta:
120 120 return self._manifestdelta[path], self._manifestdelta.flags(path)
121 121 node, flag = self._repo.manifest.find(self._changeset[0], path)
122 122 if not node:
123 123 raise revlog.LookupError(self._node, path,
124 124 _('not found in manifest'))
125 125
126 126 return node, flag
127 127
128 128 def filenode(self, path):
129 129 return self._fileinfo(path)[0]
130 130
131 131 def flags(self, path):
132 132 try:
133 133 return self._fileinfo(path)[1]
134 134 except revlog.LookupError:
135 135 return ''
136 136
137 137 def filectx(self, path, fileid=None, filelog=None):
138 138 """get a file context from this changeset"""
139 139 if fileid is None:
140 140 fileid = self.filenode(path)
141 141 return filectx(self._repo, path, fileid=fileid,
142 142 changectx=self, filelog=filelog)
143 143
144 144 def ancestor(self, c2):
145 145 """
146 146 return the ancestor context of self and c2
147 147 """
148 148 n = self._repo.changelog.ancestor(self._node, c2._node)
149 149 return changectx(self._repo, n)
150 150
151 151 def walk(self, match):
152 152 fdict = dict.fromkeys(match.files())
153 153 # for dirstate.walk, files=['.'] means "walk the whole tree".
154 154 # follow that here, too
155 155 fdict.pop('.', None)
156 156 for fn in self:
157 157 for ffn in fdict:
158 158 # match if the file is the exact name or a directory
159 159 if ffn == fn or fn.startswith("%s/" % ffn):
160 160 del fdict[ffn]
161 161 break
162 162 if match(fn):
163 163 yield fn
164 164 for fn in util.sort(fdict):
165 165 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
166 166 yield fn
167 167
168 168 class filectx(object):
169 169 """A filecontext object makes access to data related to a particular
170 170 filerevision convenient."""
171 171 def __init__(self, repo, path, changeid=None, fileid=None,
172 172 filelog=None, changectx=None):
173 173 """changeid can be a changeset revision, node, or tag.
174 174 fileid can be a file revision or node."""
175 175 self._repo = repo
176 176 self._path = path
177 177
178 178 assert (changeid is not None
179 179 or fileid is not None
180 180 or changectx is not None)
181 181
182 182 if filelog:
183 183 self._filelog = filelog
184 184
185 185 if changeid is not None:
186 186 self._changeid = changeid
187 187 if changectx is not None:
188 188 self._changectx = changectx
189 189 if fileid is not None:
190 190 self._fileid = fileid
191 191
192 192 def __getattr__(self, name):
193 193 if name == '_changectx':
194 194 self._changectx = changectx(self._repo, self._changeid)
195 195 return self._changectx
196 196 elif name == '_filelog':
197 197 self._filelog = self._repo.file(self._path)
198 198 return self._filelog
199 199 elif name == '_changeid':
200 200 if '_changectx' in self.__dict__:
201 201 self._changeid = self._changectx.rev()
202 202 else:
203 self._changeid = self._filelog.linkrev(self._filenode)
203 self._changeid = self._filelog.linkrev(self._filerev)
204 204 return self._changeid
205 205 elif name == '_filenode':
206 206 if '_fileid' in self.__dict__:
207 207 self._filenode = self._filelog.lookup(self._fileid)
208 208 else:
209 209 self._filenode = self._changectx.filenode(self._path)
210 210 return self._filenode
211 211 elif name == '_filerev':
212 212 self._filerev = self._filelog.rev(self._filenode)
213 213 return self._filerev
214 214 elif name == '_repopath':
215 215 self._repopath = self._path
216 216 return self._repopath
217 217 else:
218 218 raise AttributeError(name)
219 219
220 220 def __nonzero__(self):
221 221 try:
222 222 n = self._filenode
223 223 return True
224 224 except revlog.LookupError:
225 225 # file is missing
226 226 return False
227 227
228 228 def __str__(self):
229 229 return "%s@%s" % (self.path(), short(self.node()))
230 230
231 231 def __repr__(self):
232 232 return "<filectx %s>" % str(self)
233 233
234 234 def __hash__(self):
235 235 try:
236 236 return hash((self._path, self._fileid))
237 237 except AttributeError:
238 238 return id(self)
239 239
240 240 def __eq__(self, other):
241 241 try:
242 242 return (self._path == other._path
243 243 and self._fileid == other._fileid)
244 244 except AttributeError:
245 245 return False
246 246
247 247 def __ne__(self, other):
248 248 return not (self == other)
249 249
250 250 def filectx(self, fileid):
251 251 '''opens an arbitrary revision of the file without
252 252 opening a new filelog'''
253 253 return filectx(self._repo, self._path, fileid=fileid,
254 254 filelog=self._filelog)
255 255
256 256 def filerev(self): return self._filerev
257 257 def filenode(self): return self._filenode
258 258 def flags(self): return self._changectx.flags(self._path)
259 259 def filelog(self): return self._filelog
260 260
261 261 def rev(self):
262 262 if '_changectx' in self.__dict__:
263 263 return self._changectx.rev()
264 264 if '_changeid' in self.__dict__:
265 265 return self._changectx.rev()
266 return self._filelog.linkrev(self._filenode)
266 return self._filelog.linkrev(self._filerev)
267 267
268 def linkrev(self): return self._filelog.linkrev(self._filenode)
268 def linkrev(self): return self._filelog.linkrev(self._filerev)
269 269 def node(self): return self._changectx.node()
270 270 def user(self): return self._changectx.user()
271 271 def date(self): return self._changectx.date()
272 272 def files(self): return self._changectx.files()
273 273 def description(self): return self._changectx.description()
274 274 def branch(self): return self._changectx.branch()
275 275 def manifest(self): return self._changectx.manifest()
276 276 def changectx(self): return self._changectx
277 277
278 278 def data(self): return self._filelog.read(self._filenode)
279 279 def path(self): return self._path
280 280 def size(self): return self._filelog.size(self._filerev)
281 281
282 282 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
283 283
284 284 def renamed(self):
285 285 """check if file was actually renamed in this changeset revision
286 286
287 287 If rename logged in file revision, we report copy for changeset only
288 288 if file revisions linkrev points back to the changeset in question
289 289 or both changeset parents contain different file revisions.
290 290 """
291 291
292 292 renamed = self._filelog.renamed(self._filenode)
293 293 if not renamed:
294 294 return renamed
295 295
296 296 if self.rev() == self.linkrev():
297 297 return renamed
298 298
299 299 name = self.path()
300 300 fnode = self._filenode
301 301 for p in self._changectx.parents():
302 302 try:
303 303 if fnode == p.filenode(name):
304 304 return None
305 305 except revlog.LookupError:
306 306 pass
307 307 return renamed
308 308
309 309 def parents(self):
310 310 p = self._path
311 311 fl = self._filelog
312 312 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
313 313
314 314 r = self._filelog.renamed(self._filenode)
315 315 if r:
316 316 pl[0] = (r[0], r[1], None)
317 317
318 318 return [filectx(self._repo, p, fileid=n, filelog=l)
319 319 for p,n,l in pl if n != nullid]
320 320
321 321 def children(self):
322 322 # hard for renames
323 323 c = self._filelog.children(self._filenode)
324 324 return [filectx(self._repo, self._path, fileid=x,
325 325 filelog=self._filelog) for x in c]
326 326
327 327 def annotate(self, follow=False, linenumber=None):
328 328 '''returns a list of tuples of (ctx, line) for each line
329 329 in the file, where ctx is the filectx of the node where
330 330 that line was last changed.
331 331 This returns tuples of ((ctx, linenumber), line) for each line,
332 332 if "linenumber" parameter is NOT "None".
333 333 In such tuples, linenumber means one at the first appearance
334 334 in the managed file.
335 335 To reduce annotation cost,
336 336 this returns fixed value(False is used) as linenumber,
337 337 if "linenumber" parameter is "False".'''
338 338
339 339 def decorate_compat(text, rev):
340 340 return ([rev] * len(text.splitlines()), text)
341 341
342 342 def without_linenumber(text, rev):
343 343 return ([(rev, False)] * len(text.splitlines()), text)
344 344
345 345 def with_linenumber(text, rev):
346 346 size = len(text.splitlines())
347 347 return ([(rev, i) for i in xrange(1, size + 1)], text)
348 348
349 349 decorate = (((linenumber is None) and decorate_compat) or
350 350 (linenumber and with_linenumber) or
351 351 without_linenumber)
352 352
353 353 def pair(parent, child):
354 354 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
355 355 child[0][b1:b2] = parent[0][a1:a2]
356 356 return child
357 357
358 358 getlog = util.cachefunc(lambda x: self._repo.file(x))
359 359 def getctx(path, fileid):
360 360 log = path == self._path and self._filelog or getlog(path)
361 361 return filectx(self._repo, path, fileid=fileid, filelog=log)
362 362 getctx = util.cachefunc(getctx)
363 363
364 364 def parents(f):
365 365 # we want to reuse filectx objects as much as possible
366 366 p = f._path
367 367 if f._filerev is None: # working dir
368 368 pl = [(n.path(), n.filerev()) for n in f.parents()]
369 369 else:
370 370 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
371 371
372 372 if follow:
373 373 r = f.renamed()
374 374 if r:
375 375 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
376 376
377 377 return [getctx(p, n) for p, n in pl if n != nullrev]
378 378
379 379 # use linkrev to find the first changeset where self appeared
380 380 if self.rev() != self.linkrev():
381 381 base = self.filectx(self.filerev())
382 382 else:
383 383 base = self
384 384
385 385 # find all ancestors
386 386 needed = {base: 1}
387 387 visit = [base]
388 388 files = [base._path]
389 389 while visit:
390 390 f = visit.pop(0)
391 391 for p in parents(f):
392 392 if p not in needed:
393 393 needed[p] = 1
394 394 visit.append(p)
395 395 if p._path not in files:
396 396 files.append(p._path)
397 397 else:
398 398 # count how many times we'll use this
399 399 needed[p] += 1
400 400
401 401 # sort by revision (per file) which is a topological order
402 402 visit = []
403 403 for f in files:
404 404 fn = [(n.rev(), n) for n in needed if n._path == f]
405 405 visit.extend(fn)
406 406
407 407 hist = {}
408 408 for r, f in util.sort(visit):
409 409 curr = decorate(f.data(), f)
410 410 for p in parents(f):
411 411 if p != nullid:
412 412 curr = pair(hist[p], curr)
413 413 # trim the history of unneeded revs
414 414 needed[p] -= 1
415 415 if not needed[p]:
416 416 del hist[p]
417 417 hist[f] = curr
418 418
419 419 return zip(hist[f][0], hist[f][1].splitlines(1))
420 420
421 421 def ancestor(self, fc2):
422 422 """
423 423 find the common ancestor file context, if any, of self, and fc2
424 424 """
425 425
426 426 acache = {}
427 427
428 428 # prime the ancestor cache for the working directory
429 429 for c in (self, fc2):
430 430 if c._filerev == None:
431 431 pl = [(n.path(), n.filenode()) for n in c.parents()]
432 432 acache[(c._path, None)] = pl
433 433
434 434 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
435 435 def parents(vertex):
436 436 if vertex in acache:
437 437 return acache[vertex]
438 438 f, n = vertex
439 439 if f not in flcache:
440 440 flcache[f] = self._repo.file(f)
441 441 fl = flcache[f]
442 442 pl = [(f, p) for p in fl.parents(n) if p != nullid]
443 443 re = fl.renamed(n)
444 444 if re:
445 445 pl.append(re)
446 446 acache[vertex] = pl
447 447 return pl
448 448
449 449 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
450 450 v = ancestor.ancestor(a, b, parents)
451 451 if v:
452 452 f, n = v
453 453 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
454 454
455 455 return None
456 456
457 457 class workingctx(changectx):
458 458 """A workingctx object makes access to data related to
459 459 the current working directory convenient.
460 460 parents - a pair of parent nodeids, or None to use the dirstate.
461 461 date - any valid date string or (unixtime, offset), or None.
462 462 user - username string, or None.
463 463 extra - a dictionary of extra values, or None.
464 464 changes - a list of file lists as returned by localrepo.status()
465 465 or None to use the repository status.
466 466 """
467 467 def __init__(self, repo, parents=None, text="", user=None, date=None,
468 468 extra=None, changes=None):
469 469 self._repo = repo
470 470 self._rev = None
471 471 self._node = None
472 472 self._text = text
473 473 if date:
474 474 self._date = util.parsedate(date)
475 475 if user:
476 476 self._user = user
477 477 if parents:
478 478 self._parents = [changectx(self._repo, p) for p in parents]
479 479 if changes:
480 480 self._status = list(changes)
481 481
482 482 self._extra = {}
483 483 if extra:
484 484 self._extra = extra.copy()
485 485 if 'branch' not in self._extra:
486 486 branch = self._repo.dirstate.branch()
487 487 try:
488 488 branch = branch.decode('UTF-8').encode('UTF-8')
489 489 except UnicodeDecodeError:
490 490 raise util.Abort(_('branch name not in UTF-8!'))
491 491 self._extra['branch'] = branch
492 492 if self._extra['branch'] == '':
493 493 self._extra['branch'] = 'default'
494 494
495 495 def __str__(self):
496 496 return str(self._parents[0]) + "+"
497 497
498 498 def __nonzero__(self):
499 499 return True
500 500
501 501 def __contains__(self, key):
502 502 return self._dirstate[key] not in "?r"
503 503
504 504 def __getattr__(self, name):
505 505 if name == '_status':
506 506 self._status = self._repo.status(unknown=True)
507 507 return self._status
508 508 elif name == '_user':
509 509 self._user = self._repo.ui.username()
510 510 return self._user
511 511 elif name == '_date':
512 512 self._date = util.makedate()
513 513 return self._date
514 514 if name == '_manifest':
515 515 self._buildmanifest()
516 516 return self._manifest
517 517 elif name == '_parents':
518 518 p = self._repo.dirstate.parents()
519 519 if p[1] == nullid:
520 520 p = p[:-1]
521 521 self._parents = [changectx(self._repo, x) for x in p]
522 522 return self._parents
523 523 else:
524 524 raise AttributeError(name)
525 525
526 526 def _buildmanifest(self):
527 527 """generate a manifest corresponding to the working directory"""
528 528
529 529 man = self._parents[0].manifest().copy()
530 530 copied = self._repo.dirstate.copies()
531 531 cf = lambda x: man.flags(copied.get(x, x))
532 532 ff = self._repo.dirstate.flagfunc(cf)
533 533 modified, added, removed, deleted, unknown = self._status[:5]
534 534 for i, l in (("a", added), ("m", modified), ("u", unknown)):
535 535 for f in l:
536 536 man[f] = man.get(copied.get(f, f), nullid) + i
537 537 try:
538 538 man.set(f, ff(f))
539 539 except OSError:
540 540 pass
541 541
542 542 for f in deleted + removed:
543 543 if f in man:
544 544 del man[f]
545 545
546 546 self._manifest = man
547 547
548 548 def manifest(self): return self._manifest
549 549
550 550 def user(self): return self._user or self._repo.ui.username()
551 551 def date(self): return self._date
552 552 def description(self): return self._text
553 553 def files(self):
554 554 return util.sort(self._status[0] + self._status[1] + self._status[2])
555 555
556 556 def modified(self): return self._status[0]
557 557 def added(self): return self._status[1]
558 558 def removed(self): return self._status[2]
559 559 def deleted(self): return self._status[3]
560 560 def unknown(self): return self._status[4]
561 561 def clean(self): return self._status[5]
562 562 def branch(self): return self._extra['branch']
563 563 def extra(self): return self._extra
564 564
565 565 def tags(self):
566 566 t = []
567 567 [t.extend(p.tags()) for p in self.parents()]
568 568 return t
569 569
570 570 def children(self):
571 571 return []
572 572
573 573 def flags(self, path):
574 574 if '_manifest' in self.__dict__:
575 575 try:
576 576 return self._manifest.flags(path)
577 577 except KeyError:
578 578 return ''
579 579
580 580 pnode = self._parents[0].changeset()[0]
581 581 orig = self._repo.dirstate.copies().get(path, path)
582 582 node, flag = self._repo.manifest.find(pnode, orig)
583 583 try:
584 584 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
585 585 return ff(path)
586 586 except OSError:
587 587 pass
588 588
589 589 if not node or path in self.deleted() or path in self.removed():
590 590 return ''
591 591 return flag
592 592
593 593 def filectx(self, path, filelog=None):
594 594 """get a file context from the working directory"""
595 595 return workingfilectx(self._repo, path, workingctx=self,
596 596 filelog=filelog)
597 597
598 598 def ancestor(self, c2):
599 599 """return the ancestor context of self and c2"""
600 600 return self._parents[0].ancestor(c2) # punt on two parents for now
601 601
602 602 def walk(self, match):
603 603 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
604 604
605 605 class workingfilectx(filectx):
606 606 """A workingfilectx object makes access to data related to a particular
607 607 file in the working directory convenient."""
608 608 def __init__(self, repo, path, filelog=None, workingctx=None):
609 609 """changeid can be a changeset revision, node, or tag.
610 610 fileid can be a file revision or node."""
611 611 self._repo = repo
612 612 self._path = path
613 613 self._changeid = None
614 614 self._filerev = self._filenode = None
615 615
616 616 if filelog:
617 617 self._filelog = filelog
618 618 if workingctx:
619 619 self._changectx = workingctx
620 620
621 621 def __getattr__(self, name):
622 622 if name == '_changectx':
623 623 self._changectx = workingctx(self._repo)
624 624 return self._changectx
625 625 elif name == '_repopath':
626 626 self._repopath = (self._repo.dirstate.copied(self._path)
627 627 or self._path)
628 628 return self._repopath
629 629 elif name == '_filelog':
630 630 self._filelog = self._repo.file(self._repopath)
631 631 return self._filelog
632 632 else:
633 633 raise AttributeError(name)
634 634
635 635 def __nonzero__(self):
636 636 return True
637 637
638 638 def __str__(self):
639 639 return "%s@%s" % (self.path(), self._changectx)
640 640
641 641 def filectx(self, fileid):
642 642 '''opens an arbitrary revision of the file without
643 643 opening a new filelog'''
644 644 return filectx(self._repo, self._repopath, fileid=fileid,
645 645 filelog=self._filelog)
646 646
647 647 def rev(self):
648 648 if '_changectx' in self.__dict__:
649 649 return self._changectx.rev()
650 return self._filelog.linkrev(self._filenode)
650 return self._filelog.linkrev(self._filerev)
651 651
652 652 def data(self): return self._repo.wread(self._path)
653 653 def renamed(self):
654 654 rp = self._repopath
655 655 if rp == self._path:
656 656 return None
657 657 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
658 658
659 659 def parents(self):
660 660 '''return parent filectxs, following copies if necessary'''
661 661 p = self._path
662 662 rp = self._repopath
663 663 pcl = self._changectx._parents
664 664 fl = self._filelog
665 665 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
666 666 if len(pcl) > 1:
667 667 if rp != p:
668 668 fl = None
669 669 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
670 670
671 671 return [filectx(self._repo, p, fileid=n, filelog=l)
672 672 for p,n,l in pl if n != nullid]
673 673
674 674 def children(self):
675 675 return []
676 676
677 677 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
678 678 def date(self):
679 679 t, tz = self._changectx.date()
680 680 try:
681 681 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
682 682 except OSError, err:
683 683 if err.errno != errno.ENOENT: raise
684 684 return (t, tz)
685 685
686 686 def cmp(self, text): return self._repo.wread(self._path) == text
687 687
688 688 class memctx(object):
689 689 """Use memctx to perform in-memory commits via localrepo.commitctx().
690 690
691 691 Revision information is supplied at initialization time while
692 692 related files data and is made available through a callback
693 693 mechanism. 'repo' is the current localrepo, 'parents' is a
694 694 sequence of two parent revisions identifiers (pass None for every
695 695 missing parent), 'text' is the commit message and 'files' lists
696 696 names of files touched by the revision (normalized and relative to
697 697 repository root).
698 698
699 699 filectxfn(repo, memctx, path) is a callable receiving the
700 700 repository, the current memctx object and the normalized path of
701 701 requested file, relative to repository root. It is fired by the
702 702 commit function for every file in 'files', but calls order is
703 703 undefined. If the file is available in the revision being
704 704 committed (updated or added), filectxfn returns a memfilectx
705 705 object. If the file was removed, filectxfn raises an
706 706 IOError. Moved files are represented by marking the source file
707 707 removed and the new file added with copy information (see
708 708 memfilectx).
709 709
710 710 user receives the committer name and defaults to current
711 711 repository username, date is the commit date in any format
712 712 supported by util.parsedate() and defaults to current date, extra
713 713 is a dictionary of metadata or is left empty.
714 714 """
715 715 def __init__(self, repo, parents, text, files, filectxfn, user=None,
716 716 date=None, extra=None):
717 717 self._repo = repo
718 718 self._rev = None
719 719 self._node = None
720 720 self._text = text
721 721 self._date = date and util.parsedate(date) or util.makedate()
722 722 self._user = user
723 723 parents = [(p or nullid) for p in parents]
724 724 p1, p2 = parents
725 725 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
726 726 files = util.sort(list(files))
727 727 self._status = [files, [], [], [], []]
728 728 self._filectxfn = filectxfn
729 729
730 730 self._extra = extra and extra.copy() or {}
731 731 if 'branch' not in self._extra:
732 732 self._extra['branch'] = 'default'
733 733 elif self._extra.get('branch') == '':
734 734 self._extra['branch'] = 'default'
735 735
736 736 def __str__(self):
737 737 return str(self._parents[0]) + "+"
738 738
739 739 def __int__(self):
740 740 return self._rev
741 741
742 742 def __nonzero__(self):
743 743 return True
744 744
745 745 def user(self): return self._user or self._repo.ui.username()
746 746 def date(self): return self._date
747 747 def description(self): return self._text
748 748 def files(self): return self.modified()
749 749 def modified(self): return self._status[0]
750 750 def added(self): return self._status[1]
751 751 def removed(self): return self._status[2]
752 752 def deleted(self): return self._status[3]
753 753 def unknown(self): return self._status[4]
754 754 def clean(self): return self._status[5]
755 755 def branch(self): return self._extra['branch']
756 756 def extra(self): return self._extra
757 757 def flags(self, f): return self[f].flags()
758 758
759 759 def parents(self):
760 760 """return contexts for each parent changeset"""
761 761 return self._parents
762 762
763 763 def filectx(self, path, filelog=None):
764 764 """get a file context from the working directory"""
765 765 return self._filectxfn(self._repo, self, path)
766 766
767 767 class memfilectx(object):
768 768 """memfilectx represents an in-memory file to commit.
769 769
770 770 See memctx for more details.
771 771 """
772 772 def __init__(self, path, data, islink, isexec, copied):
773 773 """
774 774 path is the normalized file path relative to repository root.
775 775 data is the file content as a string.
776 776 islink is True if the file is a symbolic link.
777 777 isexec is True if the file is executable.
778 778 copied is the source file path if current file was copied in the
779 779 revision being committed, or None."""
780 780 self._path = path
781 781 self._data = data
782 782 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
783 783 self._copied = None
784 784 if copied:
785 785 self._copied = (copied, nullid)
786 786
787 787 def __nonzero__(self): return True
788 788 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
789 789 def path(self): return self._path
790 790 def data(self): return self._data
791 791 def flags(self): return self._flags
792 792 def isexec(self): return 'x' in self._flags
793 793 def islink(self): return 'l' in self._flags
794 794 def renamed(self): return self._copied
795 795
@@ -1,657 +1,657 b''
1 1 #
2 2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, mimetypes, re, cgi, copy
9 9 import webutil
10 10 from mercurial import revlog, archival, templatefilters
11 11 from mercurial.node import short, hex, nullid
12 12 from mercurial.util import binary, datestr
13 13 from mercurial.repo import RepoError
14 14 from common import paritygen, staticfile, get_contact, ErrorResponse
15 15 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
16 16 from mercurial import graphmod, util
17 17
18 18 # __all__ is populated with the allowed commands. Be sure to add to it if
19 19 # you're adding a new command, or the new command won't work.
20 20
21 21 __all__ = [
22 22 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
23 23 'manifest', 'tags', 'summary', 'filediff', 'diff', 'annotate', 'filelog',
24 24 'archive', 'static', 'graph',
25 25 ]
26 26
27 27 def log(web, req, tmpl):
28 28 if 'file' in req.form and req.form['file'][0]:
29 29 return filelog(web, req, tmpl)
30 30 else:
31 31 return changelog(web, req, tmpl)
32 32
33 33 def rawfile(web, req, tmpl):
34 34 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
35 35 if not path:
36 36 content = manifest(web, req, tmpl)
37 37 req.respond(HTTP_OK, web.ctype)
38 38 return content
39 39
40 40 try:
41 41 fctx = webutil.filectx(web.repo, req)
42 42 except revlog.LookupError, inst:
43 43 try:
44 44 content = manifest(web, req, tmpl)
45 45 req.respond(HTTP_OK, web.ctype)
46 46 return content
47 47 except ErrorResponse:
48 48 raise inst
49 49
50 50 path = fctx.path()
51 51 text = fctx.data()
52 52 mt = mimetypes.guess_type(path)[0]
53 53 if mt is None:
54 54 mt = binary(text) and 'application/octet-stream' or 'text/plain'
55 55
56 56 req.respond(HTTP_OK, mt, path, len(text))
57 57 return [text]
58 58
59 59 def _filerevision(web, tmpl, fctx):
60 60 f = fctx.path()
61 61 text = fctx.data()
62 62 parity = paritygen(web.stripecount)
63 63
64 64 if binary(text):
65 65 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
66 66 text = '(binary:%s)' % mt
67 67
68 68 def lines():
69 69 for lineno, t in enumerate(text.splitlines(1)):
70 70 yield {"line": t,
71 71 "lineid": "l%d" % (lineno + 1),
72 72 "linenumber": "% 6d" % (lineno + 1),
73 73 "parity": parity.next()}
74 74
75 75 return tmpl("filerevision",
76 76 file=f,
77 77 path=webutil.up(f),
78 78 text=lines(),
79 79 rev=fctx.rev(),
80 80 node=hex(fctx.node()),
81 81 author=fctx.user(),
82 82 date=fctx.date(),
83 83 desc=fctx.description(),
84 84 branch=webutil.nodebranchnodefault(fctx),
85 85 parent=webutil.siblings(fctx.parents()),
86 86 child=webutil.siblings(fctx.children()),
87 87 rename=webutil.renamelink(fctx),
88 88 permissions=fctx.manifest().flags(f))
89 89
90 90 def file(web, req, tmpl):
91 91 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
92 92 if not path:
93 93 return manifest(web, req, tmpl)
94 94 try:
95 95 return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
96 96 except revlog.LookupError, inst:
97 97 try:
98 98 return manifest(web, req, tmpl)
99 99 except ErrorResponse:
100 100 raise inst
101 101
102 102 def _search(web, tmpl, query):
103 103
104 104 def changelist(**map):
105 105 cl = web.repo.changelog
106 106 count = 0
107 107 qw = query.lower().split()
108 108
109 109 def revgen():
110 110 for i in xrange(len(cl) - 1, 0, -100):
111 111 l = []
112 112 for j in xrange(max(0, i - 100), i + 1):
113 113 ctx = web.repo[j]
114 114 l.append(ctx)
115 115 l.reverse()
116 116 for e in l:
117 117 yield e
118 118
119 119 for ctx in revgen():
120 120 miss = 0
121 121 for q in qw:
122 122 if not (q in ctx.user().lower() or
123 123 q in ctx.description().lower() or
124 124 q in " ".join(ctx.files()).lower()):
125 125 miss = 1
126 126 break
127 127 if miss:
128 128 continue
129 129
130 130 count += 1
131 131 n = ctx.node()
132 132 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
133 133 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
134 134
135 135 yield tmpl('searchentry',
136 136 parity=parity.next(),
137 137 author=ctx.user(),
138 138 parent=webutil.siblings(ctx.parents()),
139 139 child=webutil.siblings(ctx.children()),
140 140 changelogtag=showtags,
141 141 desc=ctx.description(),
142 142 date=ctx.date(),
143 143 files=files,
144 144 rev=ctx.rev(),
145 145 node=hex(n),
146 146 tags=webutil.nodetagsdict(web.repo, n),
147 147 inbranch=webutil.nodeinbranch(web.repo, ctx),
148 148 branches=webutil.nodebranchdict(web.repo, ctx))
149 149
150 150 if count >= web.maxchanges:
151 151 break
152 152
153 153 cl = web.repo.changelog
154 154 parity = paritygen(web.stripecount)
155 155
156 156 return tmpl('search',
157 157 query=query,
158 158 node=hex(cl.tip()),
159 159 entries=changelist,
160 160 archives=web.archivelist("tip"))
161 161
162 162 def changelog(web, req, tmpl, shortlog = False):
163 163 if 'node' in req.form:
164 164 ctx = webutil.changectx(web.repo, req)
165 165 else:
166 166 if 'rev' in req.form:
167 167 hi = req.form['rev'][0]
168 168 else:
169 169 hi = len(web.repo) - 1
170 170 try:
171 171 ctx = web.repo[hi]
172 172 except RepoError:
173 173 return _search(web, tmpl, hi) # XXX redirect to 404 page?
174 174
175 175 def changelist(limit=0, **map):
176 176 cl = web.repo.changelog
177 177 l = [] # build a list in forward order for efficiency
178 178 for i in xrange(start, end):
179 179 ctx = web.repo[i]
180 180 n = ctx.node()
181 181 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
182 182 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
183 183
184 184 l.insert(0, {"parity": parity.next(),
185 185 "author": ctx.user(),
186 186 "parent": webutil.siblings(ctx.parents(), i - 1),
187 187 "child": webutil.siblings(ctx.children(), i + 1),
188 188 "changelogtag": showtags,
189 189 "desc": ctx.description(),
190 190 "date": ctx.date(),
191 191 "files": files,
192 192 "rev": i,
193 193 "node": hex(n),
194 194 "tags": webutil.nodetagsdict(web.repo, n),
195 195 "inbranch": webutil.nodeinbranch(web.repo, ctx),
196 196 "branches": webutil.nodebranchdict(web.repo, ctx)
197 197 })
198 198
199 199 if limit > 0:
200 200 l = l[:limit]
201 201
202 202 for e in l:
203 203 yield e
204 204
205 205 maxchanges = shortlog and web.maxshortchanges or web.maxchanges
206 206 cl = web.repo.changelog
207 207 count = len(cl)
208 208 pos = ctx.rev()
209 209 start = max(0, pos - maxchanges + 1)
210 210 end = min(count, start + maxchanges)
211 211 pos = end - 1
212 212 parity = paritygen(web.stripecount, offset=start-end)
213 213
214 214 changenav = webutil.revnavgen(pos, maxchanges, count, web.repo.changectx)
215 215
216 216 return tmpl(shortlog and 'shortlog' or 'changelog',
217 217 changenav=changenav,
218 218 node=hex(ctx.node()),
219 219 rev=pos, changesets=count,
220 220 entries=lambda **x: changelist(limit=0,**x),
221 221 latestentry=lambda **x: changelist(limit=1,**x),
222 222 archives=web.archivelist("tip"))
223 223
224 224 def shortlog(web, req, tmpl):
225 225 return changelog(web, req, tmpl, shortlog = True)
226 226
227 227 def changeset(web, req, tmpl):
228 228 ctx = webutil.changectx(web.repo, req)
229 229 showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
230 230 parents = ctx.parents()
231 231
232 232 files = []
233 233 parity = paritygen(web.stripecount)
234 234 for f in ctx.files():
235 235 template = f in ctx and 'filenodelink' or 'filenolink'
236 236 files.append(tmpl(template,
237 237 node=ctx.hex(), file=f,
238 238 parity=parity.next()))
239 239
240 240 parity = paritygen(web.stripecount)
241 241 diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity)
242 242 return tmpl('changeset',
243 243 diff=diffs,
244 244 rev=ctx.rev(),
245 245 node=ctx.hex(),
246 246 parent=webutil.siblings(parents),
247 247 child=webutil.siblings(ctx.children()),
248 248 changesettag=showtags,
249 249 author=ctx.user(),
250 250 desc=ctx.description(),
251 251 date=ctx.date(),
252 252 files=files,
253 253 archives=web.archivelist(ctx.hex()),
254 254 tags=webutil.nodetagsdict(web.repo, ctx.node()),
255 255 branch=webutil.nodebranchnodefault(ctx),
256 256 inbranch=webutil.nodeinbranch(web.repo, ctx),
257 257 branches=webutil.nodebranchdict(web.repo, ctx))
258 258
259 259 rev = changeset
260 260
261 261 def manifest(web, req, tmpl):
262 262 ctx = webutil.changectx(web.repo, req)
263 263 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
264 264 mf = ctx.manifest()
265 265 node = ctx.node()
266 266
267 267 files = {}
268 268 dirs = {}
269 269 parity = paritygen(web.stripecount)
270 270
271 271 if path and path[-1] != "/":
272 272 path += "/"
273 273 l = len(path)
274 274 abspath = "/" + path
275 275
276 276 for f, n in mf.items():
277 277 if f[:l] != path:
278 278 continue
279 279 remain = f[l:]
280 280 elements = remain.split('/')
281 281 if len(elements) == 1:
282 282 files[remain] = f
283 283 else:
284 284 h = dirs # need to retain ref to dirs (root)
285 285 for elem in elements[0:-1]:
286 286 if elem not in h:
287 287 h[elem] = {}
288 288 h = h[elem]
289 289 if len(h) > 1:
290 290 break
291 291 h[None] = None # denotes files present
292 292
293 293 if not files and not dirs:
294 294 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
295 295
296 296 def filelist(**map):
297 297 for f in util.sort(files):
298 298 full = files[f]
299 299
300 300 fctx = ctx.filectx(full)
301 301 yield {"file": full,
302 302 "parity": parity.next(),
303 303 "basename": f,
304 304 "date": fctx.date(),
305 305 "size": fctx.size(),
306 306 "permissions": mf.flags(full)}
307 307
308 308 def dirlist(**map):
309 309 for d in util.sort(dirs):
310 310
311 311 emptydirs = []
312 312 h = dirs[d]
313 313 while isinstance(h, dict) and len(h) == 1:
314 314 k,v = h.items()[0]
315 315 if v:
316 316 emptydirs.append(k)
317 317 h = v
318 318
319 319 path = "%s%s" % (abspath, d)
320 320 yield {"parity": parity.next(),
321 321 "path": path,
322 322 "emptydirs": "/".join(emptydirs),
323 323 "basename": d}
324 324
325 325 return tmpl("manifest",
326 326 rev=ctx.rev(),
327 327 node=hex(node),
328 328 path=abspath,
329 329 up=webutil.up(abspath),
330 330 upparity=parity.next(),
331 331 fentries=filelist,
332 332 dentries=dirlist,
333 333 archives=web.archivelist(hex(node)),
334 334 tags=webutil.nodetagsdict(web.repo, node),
335 335 inbranch=webutil.nodeinbranch(web.repo, ctx),
336 336 branches=webutil.nodebranchdict(web.repo, ctx))
337 337
338 338 def tags(web, req, tmpl):
339 339 i = web.repo.tagslist()
340 340 i.reverse()
341 341 parity = paritygen(web.stripecount)
342 342
343 343 def entries(notip=False,limit=0, **map):
344 344 count = 0
345 345 for k, n in i:
346 346 if notip and k == "tip":
347 347 continue
348 348 if limit > 0 and count >= limit:
349 349 continue
350 350 count = count + 1
351 351 yield {"parity": parity.next(),
352 352 "tag": k,
353 353 "date": web.repo[n].date(),
354 354 "node": hex(n)}
355 355
356 356 return tmpl("tags",
357 357 node=hex(web.repo.changelog.tip()),
358 358 entries=lambda **x: entries(False,0, **x),
359 359 entriesnotip=lambda **x: entries(True,0, **x),
360 360 latestentry=lambda **x: entries(True,1, **x))
361 361
362 362 def summary(web, req, tmpl):
363 363 i = web.repo.tagslist()
364 364 i.reverse()
365 365
366 366 def tagentries(**map):
367 367 parity = paritygen(web.stripecount)
368 368 count = 0
369 369 for k, n in i:
370 370 if k == "tip": # skip tip
371 371 continue
372 372
373 373 count += 1
374 374 if count > 10: # limit to 10 tags
375 375 break
376 376
377 377 yield tmpl("tagentry",
378 378 parity=parity.next(),
379 379 tag=k,
380 380 node=hex(n),
381 381 date=web.repo[n].date())
382 382
383 383 def branches(**map):
384 384 parity = paritygen(web.stripecount)
385 385
386 386 b = web.repo.branchtags()
387 387 l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.items()]
388 388 for r,n,t in util.sort(l):
389 389 yield {'parity': parity.next(),
390 390 'branch': t,
391 391 'node': hex(n),
392 392 'date': web.repo[n].date()}
393 393
394 394 def changelist(**map):
395 395 parity = paritygen(web.stripecount, offset=start-end)
396 396 l = [] # build a list in forward order for efficiency
397 397 for i in xrange(start, end):
398 398 ctx = web.repo[i]
399 399 n = ctx.node()
400 400 hn = hex(n)
401 401
402 402 l.insert(0, tmpl(
403 403 'shortlogentry',
404 404 parity=parity.next(),
405 405 author=ctx.user(),
406 406 desc=ctx.description(),
407 407 date=ctx.date(),
408 408 rev=i,
409 409 node=hn,
410 410 tags=webutil.nodetagsdict(web.repo, n),
411 411 inbranch=webutil.nodeinbranch(web.repo, ctx),
412 412 branches=webutil.nodebranchdict(web.repo, ctx)))
413 413
414 414 yield l
415 415
416 416 cl = web.repo.changelog
417 417 count = len(cl)
418 418 start = max(0, count - web.maxchanges)
419 419 end = min(count, start + web.maxchanges)
420 420
421 421 return tmpl("summary",
422 422 desc=web.config("web", "description", "unknown"),
423 423 owner=get_contact(web.config) or "unknown",
424 424 lastchange=cl.read(cl.tip())[2],
425 425 tags=tagentries,
426 426 branches=branches,
427 427 shortlog=changelist,
428 428 node=hex(cl.tip()),
429 429 archives=web.archivelist("tip"))
430 430
431 431 def filediff(web, req, tmpl):
432 432 fctx, ctx = None, None
433 433 try:
434 434 fctx = webutil.filectx(web.repo, req)
435 435 except LookupError:
436 436 ctx = webutil.changectx(web.repo, req)
437 437 path = webutil.cleanpath(web.repo, req.form['file'][0])
438 438 if path not in ctx.files():
439 439 raise
440 440
441 441 if fctx is not None:
442 442 n = fctx.node()
443 443 path = fctx.path()
444 444 parents = fctx.parents()
445 445 p1 = parents and parents[0].node() or nullid
446 446 else:
447 447 n = ctx.node()
448 448 # path already defined in except clause
449 449 parents = ctx.parents()
450 450
451 451 parity = paritygen(web.stripecount)
452 452 diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity)
453 453 rename = fctx and webutil.renamelink(fctx) or []
454 454 ctx = fctx and fctx or ctx
455 455 return tmpl("filediff",
456 456 file=path,
457 457 node=hex(n),
458 458 rev=ctx.rev(),
459 459 date=ctx.date(),
460 460 desc=ctx.description(),
461 461 author=ctx.user(),
462 462 rename=rename,
463 463 branch=webutil.nodebranchnodefault(ctx),
464 464 parent=webutil.siblings(parents),
465 465 child=webutil.siblings(ctx.children()),
466 466 diff=diffs)
467 467
468 468 diff = filediff
469 469
470 470 def annotate(web, req, tmpl):
471 471 fctx = webutil.filectx(web.repo, req)
472 472 f = fctx.path()
473 473 parity = paritygen(web.stripecount)
474 474
475 475 def annotate(**map):
476 476 last = None
477 477 if binary(fctx.data()):
478 478 mt = (mimetypes.guess_type(fctx.path())[0]
479 479 or 'application/octet-stream')
480 480 lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
481 481 '(binary:%s)' % mt)])
482 482 else:
483 483 lines = enumerate(fctx.annotate(follow=True, linenumber=True))
484 484 for lineno, ((f, targetline), l) in lines:
485 485 fnode = f.filenode()
486 486
487 487 if last != fnode:
488 488 last = fnode
489 489
490 490 yield {"parity": parity.next(),
491 491 "node": hex(f.node()),
492 492 "rev": f.rev(),
493 493 "author": f.user(),
494 494 "desc": f.description(),
495 495 "file": f.path(),
496 496 "targetline": targetline,
497 497 "line": l,
498 498 "lineid": "l%d" % (lineno + 1),
499 499 "linenumber": "% 6d" % (lineno + 1)}
500 500
501 501 return tmpl("fileannotate",
502 502 file=f,
503 503 annotate=annotate,
504 504 path=webutil.up(f),
505 505 rev=fctx.rev(),
506 506 node=hex(fctx.node()),
507 507 author=fctx.user(),
508 508 date=fctx.date(),
509 509 desc=fctx.description(),
510 510 rename=webutil.renamelink(fctx),
511 511 branch=webutil.nodebranchnodefault(fctx),
512 512 parent=webutil.siblings(fctx.parents()),
513 513 child=webutil.siblings(fctx.children()),
514 514 permissions=fctx.manifest().flags(f))
515 515
516 516 def filelog(web, req, tmpl):
517 517
518 518 try:
519 519 fctx = webutil.filectx(web.repo, req)
520 520 f = fctx.path()
521 521 fl = fctx.filelog()
522 522 except revlog.LookupError:
523 523 f = webutil.cleanpath(web.repo, req.form['file'][0])
524 524 fl = web.repo.file(f)
525 525 numrevs = len(fl)
526 526 if not numrevs: # file doesn't exist at all
527 527 raise
528 528 rev = webutil.changectx(web.repo, req).rev()
529 first = fl.linkrev(fl.node(0))
529 first = fl.linkrev(0)
530 530 if rev < first: # current rev is from before file existed
531 531 raise
532 532 frev = numrevs - 1
533 while fl.linkrev(fl.node(frev)) > rev:
533 while fl.linkrev(frev) > rev:
534 534 frev -= 1
535 fctx = web.repo.filectx(f, fl.linkrev(fl.node(frev)))
535 fctx = web.repo.filectx(f, fl.linkrev(frev))
536 536
537 537 count = fctx.filerev() + 1
538 538 pagelen = web.maxshortchanges
539 539 start = max(0, fctx.filerev() - pagelen + 1) # first rev on this page
540 540 end = min(count, start + pagelen) # last rev on this page
541 541 parity = paritygen(web.stripecount, offset=start-end)
542 542
543 543 def entries(limit=0, **map):
544 544 l = []
545 545
546 546 for i in xrange(start, end):
547 547 ctx = fctx.filectx(i)
548 548
549 549 l.insert(0, {"parity": parity.next(),
550 550 "filerev": i,
551 551 "file": f,
552 552 "node": hex(ctx.node()),
553 553 "author": ctx.user(),
554 554 "date": ctx.date(),
555 555 "rename": webutil.renamelink(fctx),
556 556 "parent": webutil.siblings(fctx.parents()),
557 557 "child": webutil.siblings(fctx.children()),
558 558 "desc": ctx.description()})
559 559
560 560 if limit > 0:
561 561 l = l[:limit]
562 562
563 563 for e in l:
564 564 yield e
565 565
566 566 nodefunc = lambda x: fctx.filectx(fileid=x)
567 567 nav = webutil.revnavgen(end - 1, pagelen, count, nodefunc)
568 568 return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav,
569 569 entries=lambda **x: entries(limit=0, **x),
570 570 latestentry=lambda **x: entries(limit=1, **x))
571 571
572 572
573 573 def archive(web, req, tmpl):
574 574 type_ = req.form.get('type', [None])[0]
575 575 allowed = web.configlist("web", "allow_archive")
576 576 key = req.form['node'][0]
577 577
578 578 if type_ not in web.archives:
579 579 msg = 'Unsupported archive type: %s' % type_
580 580 raise ErrorResponse(HTTP_NOT_FOUND, msg)
581 581
582 582 if not ((type_ in allowed or
583 583 web.configbool("web", "allow" + type_, False))):
584 584 msg = 'Archive type not allowed: %s' % type_
585 585 raise ErrorResponse(HTTP_FORBIDDEN, msg)
586 586
587 587 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
588 588 cnode = web.repo.lookup(key)
589 589 arch_version = key
590 590 if cnode == key or key == 'tip':
591 591 arch_version = short(cnode)
592 592 name = "%s-%s" % (reponame, arch_version)
593 593 mimetype, artype, extension, encoding = web.archive_specs[type_]
594 594 headers = [
595 595 ('Content-Type', mimetype),
596 596 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
597 597 ]
598 598 if encoding:
599 599 headers.append(('Content-Encoding', encoding))
600 600 req.header(headers)
601 601 req.respond(HTTP_OK)
602 602 archival.archive(web.repo, req, cnode, artype, prefix=name)
603 603 return []
604 604
605 605
606 606 def static(web, req, tmpl):
607 607 fname = req.form['file'][0]
608 608 # a repo owner may set web.static in .hg/hgrc to get any file
609 609 # readable by the user running the CGI script
610 610 static = web.config("web", "static", None, untrusted=False)
611 611 if not static:
612 612 tp = web.templatepath
613 613 if isinstance(tp, str):
614 614 tp = [tp]
615 615 static = [os.path.join(p, 'static') for p in tp]
616 616 return [staticfile(static, fname, req)]
617 617
618 618 def graph(web, req, tmpl):
619 619 rev = webutil.changectx(web.repo, req).rev()
620 620 bg_height = 39
621 621
622 622 revcount = 25
623 623 if 'revcount' in req.form:
624 624 revcount = int(req.form.get('revcount', [revcount])[0])
625 625 tmpl.defaults['sessionvars']['revcount'] = revcount
626 626
627 627 lessvars = copy.copy(tmpl.defaults['sessionvars'])
628 628 lessvars['revcount'] = revcount / 2
629 629 morevars = copy.copy(tmpl.defaults['sessionvars'])
630 630 morevars['revcount'] = revcount * 2
631 631
632 632 max_rev = len(web.repo) - 1
633 633 revcount = min(max_rev, revcount)
634 634 revnode = web.repo.changelog.node(rev)
635 635 revnode_hex = hex(revnode)
636 636 uprev = min(max_rev, rev + revcount)
637 637 downrev = max(0, rev - revcount)
638 638 count = len(web.repo)
639 639 changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx)
640 640
641 641 tree = list(graphmod.graph(web.repo, rev, downrev))
642 642 canvasheight = (len(tree) + 1) * bg_height - 27;
643 643 data = []
644 644 for i, (ctx, vtx, edges) in enumerate(tree):
645 645 node = short(ctx.node())
646 646 age = templatefilters.age(ctx.date())
647 647 desc = templatefilters.firstline(ctx.description())
648 648 desc = cgi.escape(desc)
649 649 user = cgi.escape(templatefilters.person(ctx.user()))
650 650 branch = ctx.branch()
651 651 branch = branch, web.repo.branchtags().get(branch) == ctx.node()
652 652 data.append((node, vtx, edges, desc, user, age, branch, ctx.tags()))
653 653
654 654 return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
655 655 lessvars=lessvars, morevars=morevars, downrev=downrev,
656 656 canvasheight=canvasheight, jsdata=data, bg_height=bg_height,
657 657 node=revnode_hex, changenav=changenav)
@@ -1,214 +1,214 b''
1 1 # hgweb/webutil.py - utility library for the web interface.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, copy
10 10 from mercurial import match, patch
11 11 from mercurial.node import hex, nullid
12 12 from mercurial.repo import RepoError
13 13 from mercurial import util
14 14
15 15 def up(p):
16 16 if p[0] != "/":
17 17 p = "/" + p
18 18 if p[-1] == "/":
19 19 p = p[:-1]
20 20 up = os.path.dirname(p)
21 21 if up == "/":
22 22 return "/"
23 23 return up + "/"
24 24
25 25 def revnavgen(pos, pagelen, limit, nodefunc):
26 26 def seq(factor, limit=None):
27 27 if limit:
28 28 yield limit
29 29 if limit >= 20 and limit <= 40:
30 30 yield 50
31 31 else:
32 32 yield 1 * factor
33 33 yield 3 * factor
34 34 for f in seq(factor * 10):
35 35 yield f
36 36
37 37 def nav(**map):
38 38 l = []
39 39 last = 0
40 40 for f in seq(1, pagelen):
41 41 if f < pagelen or f <= last:
42 42 continue
43 43 if f > limit:
44 44 break
45 45 last = f
46 46 if pos + f < limit:
47 47 l.append(("+%d" % f, hex(nodefunc(pos + f).node())))
48 48 if pos - f >= 0:
49 49 l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node())))
50 50
51 51 try:
52 52 yield {"label": "(0)", "node": hex(nodefunc('0').node())}
53 53
54 54 for label, node in l:
55 55 yield {"label": label, "node": node}
56 56
57 57 yield {"label": "tip", "node": "tip"}
58 58 except RepoError:
59 59 pass
60 60
61 61 return nav
62 62
63 63 def siblings(siblings=[], hiderev=None, **args):
64 64 siblings = [s for s in siblings if s.node() != nullid]
65 65 if len(siblings) == 1 and siblings[0].rev() == hiderev:
66 66 return
67 67 for s in siblings:
68 68 d = {'node': hex(s.node()), 'rev': s.rev()}
69 69 d['user'] = s.user()
70 70 d['date'] = s.date()
71 71 d['description'] = s.description()
72 72 if hasattr(s, 'path'):
73 73 d['file'] = s.path()
74 74 d.update(args)
75 75 yield d
76 76
77 77 def renamelink(fctx):
78 78 r = fctx.renamed()
79 79 if r:
80 80 return [dict(file=r[0], node=hex(r[1]))]
81 81 return []
82 82
83 83 def nodetagsdict(repo, node):
84 84 return [{"name": i} for i in repo.nodetags(node)]
85 85
86 86 def nodebranchdict(repo, ctx):
87 87 branches = []
88 88 branch = ctx.branch()
89 89 # If this is an empty repo, ctx.node() == nullid,
90 90 # ctx.branch() == 'default', but branchtags() is
91 91 # an empty dict. Using dict.get avoids a traceback.
92 92 if repo.branchtags().get(branch) == ctx.node():
93 93 branches.append({"name": branch})
94 94 return branches
95 95
96 96 def nodeinbranch(repo, ctx):
97 97 branches = []
98 98 branch = ctx.branch()
99 99 if branch != 'default' and repo.branchtags().get(branch) != ctx.node():
100 100 branches.append({"name": branch})
101 101 return branches
102 102
103 103 def nodebranchnodefault(ctx):
104 104 branches = []
105 105 branch = ctx.branch()
106 106 if branch != 'default':
107 107 branches.append({"name": branch})
108 108 return branches
109 109
110 110 def showtag(repo, tmpl, t1, node=nullid, **args):
111 111 for t in repo.nodetags(node):
112 112 yield tmpl(t1, tag=t, **args)
113 113
114 114 def cleanpath(repo, path):
115 115 path = path.lstrip('/')
116 116 return util.canonpath(repo.root, '', path)
117 117
118 118 def changectx(repo, req):
119 119 changeid = "tip"
120 120 if 'node' in req.form:
121 121 changeid = req.form['node'][0]
122 122 elif 'manifest' in req.form:
123 123 changeid = req.form['manifest'][0]
124 124
125 125 try:
126 126 ctx = repo[changeid]
127 127 except RepoError:
128 128 man = repo.manifest
129 ctx = repo[man.linkrev(man.lookup(changeid))]
129 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
130 130
131 131 return ctx
132 132
133 133 def filectx(repo, req):
134 134 path = cleanpath(repo, req.form['file'][0])
135 135 if 'node' in req.form:
136 136 changeid = req.form['node'][0]
137 137 else:
138 138 changeid = req.form['filenode'][0]
139 139 try:
140 140 fctx = repo[changeid][path]
141 141 except RepoError:
142 142 fctx = repo.filectx(path, fileid=changeid)
143 143
144 144 return fctx
145 145
146 146 def listfilediffs(tmpl, files, node, max):
147 147 for f in files[:max]:
148 148 yield tmpl('filedifflink', node=hex(node), file=f)
149 149 if len(files) > max:
150 150 yield tmpl('fileellipses')
151 151
152 152 def diffs(repo, tmpl, ctx, files, parity):
153 153
154 154 def countgen():
155 155 start = 1
156 156 while True:
157 157 yield start
158 158 start += 1
159 159
160 160 blockcount = countgen()
161 161 def prettyprintlines(diff):
162 162 blockno = blockcount.next()
163 163 for lineno, l in enumerate(diff.splitlines(True)):
164 164 lineno = "%d.%d" % (blockno, lineno + 1)
165 165 if l.startswith('+'):
166 166 ltype = "difflineplus"
167 167 elif l.startswith('-'):
168 168 ltype = "difflineminus"
169 169 elif l.startswith('@'):
170 170 ltype = "difflineat"
171 171 else:
172 172 ltype = "diffline"
173 173 yield tmpl(ltype,
174 174 line=l,
175 175 lineid="l%s" % lineno,
176 176 linenumber="% 8s" % lineno)
177 177
178 178 if files:
179 179 m = match.exact(repo.root, repo.getcwd(), files)
180 180 else:
181 181 m = match.always(repo.root, repo.getcwd())
182 182
183 183 diffopts = patch.diffopts(repo.ui, untrusted=True)
184 184 parents = ctx.parents()
185 185 node1 = parents and parents[0].node() or nullid
186 186 node2 = ctx.node()
187 187
188 188 block = []
189 189 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
190 190 if chunk.startswith('diff') and block:
191 191 yield tmpl('diffblock', parity=parity.next(),
192 192 lines=prettyprintlines(''.join(block)))
193 193 block = []
194 194 if chunk.startswith('diff'):
195 195 chunk = ''.join(chunk.splitlines(True)[1:])
196 196 block.append(chunk)
197 197 yield tmpl('diffblock', parity=parity.next(),
198 198 lines=prettyprintlines(''.join(block)))
199 199
200 200 class sessionvars(object):
201 201 def __init__(self, vars, start='?'):
202 202 self.start = start
203 203 self.vars = vars
204 204 def __getitem__(self, key):
205 205 return self.vars[key]
206 206 def __setitem__(self, key, value):
207 207 self.vars[key] = value
208 208 def __copy__(self):
209 209 return sessionvars(copy.copy(self.vars), self.start)
210 210 def __iter__(self):
211 211 separator = self.start
212 212 for key, value in self.vars.iteritems():
213 213 yield {'name': key, 'value': str(value), 'separator': separator}
214 214 separator = '&'
@@ -1,2126 +1,2125 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store', 'fncache')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 if parentui.configbool('format', 'usefncache', True):
39 39 requirements.append("fncache")
40 40 # create an invalid changelog
41 41 self.opener("00changelog.i", "a").write(
42 42 '\0\0\0\2' # represents revlogv2
43 43 ' dummy changelog to prevent using the old repo layout'
44 44 )
45 45 reqfile = self.opener("requires", "w")
46 46 for r in requirements:
47 47 reqfile.write("%s\n" % r)
48 48 reqfile.close()
49 49 else:
50 50 raise repo.RepoError(_("repository %s not found") % path)
51 51 elif create:
52 52 raise repo.RepoError(_("repository %s already exists") % path)
53 53 else:
54 54 # find requirements
55 55 requirements = []
56 56 try:
57 57 requirements = self.opener("requires").read().splitlines()
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64
65 65 self.store = store.store(requirements, self.path, util.opener)
66 66 self.spath = self.store.path
67 67 self.sopener = self.store.opener
68 68 self.sjoin = self.store.join
69 69 self.opener.createmode = self.store.createmode
70 70
71 71 self.ui = ui.ui(parentui=parentui)
72 72 try:
73 73 self.ui.readconfig(self.join("hgrc"), self.root)
74 74 extensions.loadall(self.ui)
75 75 except IOError:
76 76 pass
77 77
78 78 self.tagscache = None
79 79 self._tagstypecache = None
80 80 self.branchcache = None
81 81 self._ubranchcache = None # UTF-8 version of branchcache
82 82 self._branchcachetip = None
83 83 self.nodetagscache = None
84 84 self.filterpats = {}
85 85 self._datafilters = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError(name)
102 102
103 103 def __getitem__(self, changeid):
104 104 if changeid == None:
105 105 return context.workingctx(self)
106 106 return context.changectx(self, changeid)
107 107
108 108 def __nonzero__(self):
109 109 return True
110 110
111 111 def __len__(self):
112 112 return len(self.changelog)
113 113
114 114 def __iter__(self):
115 115 for i in xrange(len(self)):
116 116 yield i
117 117
118 118 def url(self):
119 119 return 'file:' + self.root
120 120
121 121 def hook(self, name, throw=False, **args):
122 122 return hook.hook(self.ui, self, name, throw, **args)
123 123
124 124 tag_disallowed = ':\r\n'
125 125
126 126 def _tag(self, names, node, message, local, user, date, parent=None,
127 127 extra={}):
128 128 use_dirstate = parent is None
129 129
130 130 if isinstance(names, str):
131 131 allchars = names
132 132 names = (names,)
133 133 else:
134 134 allchars = ''.join(names)
135 135 for c in self.tag_disallowed:
136 136 if c in allchars:
137 137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 138
139 139 for name in names:
140 140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 141 local=local)
142 142
143 143 def writetags(fp, names, munge, prevtags):
144 144 fp.seek(0, 2)
145 145 if prevtags and prevtags[-1] != '\n':
146 146 fp.write('\n')
147 147 for name in names:
148 148 m = munge and munge(name) or name
149 149 if self._tagstypecache and name in self._tagstypecache:
150 150 old = self.tagscache.get(name, nullid)
151 151 fp.write('%s %s\n' % (hex(old), m))
152 152 fp.write('%s %s\n' % (hex(node), m))
153 153 fp.close()
154 154
155 155 prevtags = ''
156 156 if local:
157 157 try:
158 158 fp = self.opener('localtags', 'r+')
159 159 except IOError, err:
160 160 fp = self.opener('localtags', 'a')
161 161 else:
162 162 prevtags = fp.read()
163 163
164 164 # local tags are stored in the current charset
165 165 writetags(fp, names, None, prevtags)
166 166 for name in names:
167 167 self.hook('tag', node=hex(node), tag=name, local=local)
168 168 return
169 169
170 170 if use_dirstate:
171 171 try:
172 172 fp = self.wfile('.hgtags', 'rb+')
173 173 except IOError, err:
174 174 fp = self.wfile('.hgtags', 'ab')
175 175 else:
176 176 prevtags = fp.read()
177 177 else:
178 178 try:
179 179 prevtags = self.filectx('.hgtags', parent).data()
180 180 except revlog.LookupError:
181 181 pass
182 182 fp = self.wfile('.hgtags', 'wb')
183 183 if prevtags:
184 184 fp.write(prevtags)
185 185
186 186 # committed tags are stored in UTF-8
187 187 writetags(fp, names, util.fromlocal, prevtags)
188 188
189 189 if use_dirstate and '.hgtags' not in self.dirstate:
190 190 self.add(['.hgtags'])
191 191
192 192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 193 extra=extra)
194 194
195 195 for name in names:
196 196 self.hook('tag', node=hex(node), tag=name, local=local)
197 197
198 198 return tagnode
199 199
200 200 def tag(self, names, node, message, local, user, date):
201 201 '''tag a revision with one or more symbolic names.
202 202
203 203 names is a list of strings or, when adding a single tag, names may be a
204 204 string.
205 205
206 206 if local is True, the tags are stored in a per-repository file.
207 207 otherwise, they are stored in the .hgtags file, and a new
208 208 changeset is committed with the change.
209 209
210 210 keyword arguments:
211 211
212 212 local: whether to store tags in non-version-controlled file
213 213 (default False)
214 214
215 215 message: commit message to use if committing
216 216
217 217 user: name of user to use if committing
218 218
219 219 date: date tuple to use if committing'''
220 220
221 221 for x in self.status()[:5]:
222 222 if '.hgtags' in x:
223 223 raise util.Abort(_('working copy of .hgtags is changed '
224 224 '(please commit .hgtags manually)'))
225 225
226 226 self._tag(names, node, message, local, user, date)
227 227
228 228 def tags(self):
229 229 '''return a mapping of tag to node'''
230 230 if self.tagscache:
231 231 return self.tagscache
232 232
233 233 globaltags = {}
234 234 tagtypes = {}
235 235
236 236 def readtags(lines, fn, tagtype):
237 237 filetags = {}
238 238 count = 0
239 239
240 240 def warn(msg):
241 241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242 242
243 243 for l in lines:
244 244 count += 1
245 245 if not l:
246 246 continue
247 247 s = l.split(" ", 1)
248 248 if len(s) != 2:
249 249 warn(_("cannot parse entry"))
250 250 continue
251 251 node, key = s
252 252 key = util.tolocal(key.strip()) # stored in UTF-8
253 253 try:
254 254 bin_n = bin(node)
255 255 except TypeError:
256 256 warn(_("node '%s' is not well formed") % node)
257 257 continue
258 258 if bin_n not in self.changelog.nodemap:
259 259 warn(_("tag '%s' refers to unknown node") % key)
260 260 continue
261 261
262 262 h = []
263 263 if key in filetags:
264 264 n, h = filetags[key]
265 265 h.append(n)
266 266 filetags[key] = (bin_n, h)
267 267
268 268 for k, nh in filetags.items():
269 269 if k not in globaltags:
270 270 globaltags[k] = nh
271 271 tagtypes[k] = tagtype
272 272 continue
273 273
274 274 # we prefer the global tag if:
275 275 # it supercedes us OR
276 276 # mutual supercedes and it has a higher rank
277 277 # otherwise we win because we're tip-most
278 278 an, ah = nh
279 279 bn, bh = globaltags[k]
280 280 if (bn != an and an in bh and
281 281 (bn not in ah or len(bh) > len(ah))):
282 282 an = bn
283 283 ah.extend([n for n in bh if n not in ah])
284 284 globaltags[k] = an, ah
285 285 tagtypes[k] = tagtype
286 286
287 287 # read the tags file from each head, ending with the tip
288 288 f = None
289 289 for rev, node, fnode in self._hgtagsnodes():
290 290 f = (f and f.filectx(fnode) or
291 291 self.filectx('.hgtags', fileid=fnode))
292 292 readtags(f.data().splitlines(), f, "global")
293 293
294 294 try:
295 295 data = util.fromlocal(self.opener("localtags").read())
296 296 # localtags are stored in the local character set
297 297 # while the internal tag table is stored in UTF-8
298 298 readtags(data.splitlines(), "localtags", "local")
299 299 except IOError:
300 300 pass
301 301
302 302 self.tagscache = {}
303 303 self._tagstypecache = {}
304 304 for k,nh in globaltags.items():
305 305 n = nh[0]
306 306 if n != nullid:
307 307 self.tagscache[k] = n
308 308 self._tagstypecache[k] = tagtypes[k]
309 309 self.tagscache['tip'] = self.changelog.tip()
310 310 return self.tagscache
311 311
312 312 def tagtype(self, tagname):
313 313 '''
314 314 return the type of the given tag. result can be:
315 315
316 316 'local' : a local tag
317 317 'global' : a global tag
318 318 None : tag does not exist
319 319 '''
320 320
321 321 self.tags()
322 322
323 323 return self._tagstypecache.get(tagname)
324 324
325 325 def _hgtagsnodes(self):
326 326 heads = self.heads()
327 327 heads.reverse()
328 328 last = {}
329 329 ret = []
330 330 for node in heads:
331 331 c = self[node]
332 332 rev = c.rev()
333 333 try:
334 334 fnode = c.filenode('.hgtags')
335 335 except revlog.LookupError:
336 336 continue
337 337 ret.append((rev, node, fnode))
338 338 if fnode in last:
339 339 ret[last[fnode]] = None
340 340 last[fnode] = len(ret) - 1
341 341 return [item for item in ret if item]
342 342
343 343 def tagslist(self):
344 344 '''return a list of tags ordered by revision'''
345 345 l = []
346 346 for t, n in self.tags().items():
347 347 try:
348 348 r = self.changelog.rev(n)
349 349 except:
350 350 r = -2 # sort to the beginning of the list if unknown
351 351 l.append((r, t, n))
352 352 return [(t, n) for r, t, n in util.sort(l)]
353 353
354 354 def nodetags(self, node):
355 355 '''return the tags associated with a node'''
356 356 if not self.nodetagscache:
357 357 self.nodetagscache = {}
358 358 for t, n in self.tags().items():
359 359 self.nodetagscache.setdefault(n, []).append(t)
360 360 return self.nodetagscache.get(node, [])
361 361
362 362 def _branchtags(self, partial, lrev):
363 363 tiprev = len(self) - 1
364 364 if lrev != tiprev:
365 365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 367
368 368 return partial
369 369
370 370 def branchtags(self):
371 371 tip = self.changelog.tip()
372 372 if self.branchcache is not None and self._branchcachetip == tip:
373 373 return self.branchcache
374 374
375 375 oldtip = self._branchcachetip
376 376 self._branchcachetip = tip
377 377 if self.branchcache is None:
378 378 self.branchcache = {} # avoid recursion in changectx
379 379 else:
380 380 self.branchcache.clear() # keep using the same dict
381 381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 382 partial, last, lrev = self._readbranchcache()
383 383 else:
384 384 lrev = self.changelog.rev(oldtip)
385 385 partial = self._ubranchcache
386 386
387 387 self._branchtags(partial, lrev)
388 388
389 389 # the branch cache is stored on disk as UTF-8, but in the local
390 390 # charset internally
391 391 for k, v in partial.items():
392 392 self.branchcache[util.tolocal(k)] = v
393 393 self._ubranchcache = partial
394 394 return self.branchcache
395 395
396 396 def _readbranchcache(self):
397 397 partial = {}
398 398 try:
399 399 f = self.opener("branch.cache")
400 400 lines = f.read().split('\n')
401 401 f.close()
402 402 except (IOError, OSError):
403 403 return {}, nullid, nullrev
404 404
405 405 try:
406 406 last, lrev = lines.pop(0).split(" ", 1)
407 407 last, lrev = bin(last), int(lrev)
408 408 if lrev >= len(self) or self[lrev].node() != last:
409 409 # invalidate the cache
410 410 raise ValueError('invalidating branch cache (tip differs)')
411 411 for l in lines:
412 412 if not l: continue
413 413 node, label = l.split(" ", 1)
414 414 partial[label.strip()] = bin(node)
415 415 except (KeyboardInterrupt, util.SignalInterrupt):
416 416 raise
417 417 except Exception, inst:
418 418 if self.ui.debugflag:
419 419 self.ui.warn(str(inst), '\n')
420 420 partial, last, lrev = {}, nullid, nullrev
421 421 return partial, last, lrev
422 422
423 423 def _writebranchcache(self, branches, tip, tiprev):
424 424 try:
425 425 f = self.opener("branch.cache", "w", atomictemp=True)
426 426 f.write("%s %s\n" % (hex(tip), tiprev))
427 427 for label, node in branches.iteritems():
428 428 f.write("%s %s\n" % (hex(node), label))
429 429 f.rename()
430 430 except (IOError, OSError):
431 431 pass
432 432
433 433 def _updatebranchcache(self, partial, start, end):
434 434 for r in xrange(start, end):
435 435 c = self[r]
436 436 b = c.branch()
437 437 partial[b] = c.node()
438 438
439 439 def lookup(self, key):
440 440 if key == '.':
441 441 return self.dirstate.parents()[0]
442 442 elif key == 'null':
443 443 return nullid
444 444 n = self.changelog._match(key)
445 445 if n:
446 446 return n
447 447 if key in self.tags():
448 448 return self.tags()[key]
449 449 if key in self.branchtags():
450 450 return self.branchtags()[key]
451 451 n = self.changelog._partialmatch(key)
452 452 if n:
453 453 return n
454 454 try:
455 455 if len(key) == 20:
456 456 key = hex(key)
457 457 except:
458 458 pass
459 459 raise repo.RepoError(_("unknown revision '%s'") % key)
460 460
461 461 def local(self):
462 462 return True
463 463
464 464 def join(self, f):
465 465 return os.path.join(self.path, f)
466 466
467 467 def wjoin(self, f):
468 468 return os.path.join(self.root, f)
469 469
470 470 def rjoin(self, f):
471 471 return os.path.join(self.root, util.pconvert(f))
472 472
473 473 def file(self, f):
474 474 if f[0] == '/':
475 475 f = f[1:]
476 476 return filelog.filelog(self.sopener, f)
477 477
478 478 def changectx(self, changeid):
479 479 return self[changeid]
480 480
481 481 def parents(self, changeid=None):
482 482 '''get list of changectxs for parents of changeid'''
483 483 return self[changeid].parents()
484 484
485 485 def filectx(self, path, changeid=None, fileid=None):
486 486 """changeid can be a changeset revision, node, or tag.
487 487 fileid can be a file revision or node."""
488 488 return context.filectx(self, path, changeid, fileid)
489 489
490 490 def getcwd(self):
491 491 return self.dirstate.getcwd()
492 492
493 493 def pathto(self, f, cwd=None):
494 494 return self.dirstate.pathto(f, cwd)
495 495
496 496 def wfile(self, f, mode='r'):
497 497 return self.wopener(f, mode)
498 498
499 499 def _link(self, f):
500 500 return os.path.islink(self.wjoin(f))
501 501
502 502 def _filter(self, filter, filename, data):
503 503 if filter not in self.filterpats:
504 504 l = []
505 505 for pat, cmd in self.ui.configitems(filter):
506 506 if cmd == '!':
507 507 continue
508 508 mf = util.matcher(self.root, "", [pat], [], [])[1]
509 509 fn = None
510 510 params = cmd
511 511 for name, filterfn in self._datafilters.iteritems():
512 512 if cmd.startswith(name):
513 513 fn = filterfn
514 514 params = cmd[len(name):].lstrip()
515 515 break
516 516 if not fn:
517 517 fn = lambda s, c, **kwargs: util.filter(s, c)
518 518 # Wrap old filters not supporting keyword arguments
519 519 if not inspect.getargspec(fn)[2]:
520 520 oldfn = fn
521 521 fn = lambda s, c, **kwargs: oldfn(s, c)
522 522 l.append((mf, fn, params))
523 523 self.filterpats[filter] = l
524 524
525 525 for mf, fn, cmd in self.filterpats[filter]:
526 526 if mf(filename):
527 527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
529 529 break
530 530
531 531 return data
532 532
533 533 def adddatafilter(self, name, filter):
534 534 self._datafilters[name] = filter
535 535
536 536 def wread(self, filename):
537 537 if self._link(filename):
538 538 data = os.readlink(self.wjoin(filename))
539 539 else:
540 540 data = self.wopener(filename, 'r').read()
541 541 return self._filter("encode", filename, data)
542 542
543 543 def wwrite(self, filename, data, flags):
544 544 data = self._filter("decode", filename, data)
545 545 try:
546 546 os.unlink(self.wjoin(filename))
547 547 except OSError:
548 548 pass
549 549 if 'l' in flags:
550 550 self.wopener.symlink(data, filename)
551 551 else:
552 552 self.wopener(filename, 'w').write(data)
553 553 if 'x' in flags:
554 554 util.set_flags(self.wjoin(filename), False, True)
555 555
556 556 def wwritedata(self, filename, data):
557 557 return self._filter("decode", filename, data)
558 558
559 559 def transaction(self):
560 560 if self._transref and self._transref():
561 561 return self._transref().nest()
562 562
563 563 # abort here if the journal already exists
564 564 if os.path.exists(self.sjoin("journal")):
565 565 raise repo.RepoError(_("journal already exists - run hg recover"))
566 566
567 567 # save dirstate for rollback
568 568 try:
569 569 ds = self.opener("dirstate").read()
570 570 except IOError:
571 571 ds = ""
572 572 self.opener("journal.dirstate", "w").write(ds)
573 573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574 574
575 575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 577 (self.join("journal.branch"), self.join("undo.branch"))]
578 578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 579 self.sjoin("journal"),
580 580 aftertrans(renames),
581 581 self.store.createmode)
582 582 self._transref = weakref.ref(tr)
583 583 return tr
584 584
585 585 def recover(self):
586 586 l = self.lock()
587 587 try:
588 588 if os.path.exists(self.sjoin("journal")):
589 589 self.ui.status(_("rolling back interrupted transaction\n"))
590 590 transaction.rollback(self.sopener, self.sjoin("journal"))
591 591 self.invalidate()
592 592 return True
593 593 else:
594 594 self.ui.warn(_("no interrupted transaction available\n"))
595 595 return False
596 596 finally:
597 597 del l
598 598
599 599 def rollback(self):
600 600 wlock = lock = None
601 601 try:
602 602 wlock = self.wlock()
603 603 lock = self.lock()
604 604 if os.path.exists(self.sjoin("undo")):
605 605 self.ui.status(_("rolling back last transaction\n"))
606 606 transaction.rollback(self.sopener, self.sjoin("undo"))
607 607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
608 608 try:
609 609 branch = self.opener("undo.branch").read()
610 610 self.dirstate.setbranch(branch)
611 611 except IOError:
612 612 self.ui.warn(_("Named branch could not be reset, "
613 613 "current branch still is: %s\n")
614 614 % util.tolocal(self.dirstate.branch()))
615 615 self.invalidate()
616 616 self.dirstate.invalidate()
617 617 else:
618 618 self.ui.warn(_("no rollback information available\n"))
619 619 finally:
620 620 del lock, wlock
621 621
622 622 def invalidate(self):
623 623 for a in "changelog manifest".split():
624 624 if a in self.__dict__:
625 625 delattr(self, a)
626 626 self.tagscache = None
627 627 self._tagstypecache = None
628 628 self.nodetagscache = None
629 629 self.branchcache = None
630 630 self._ubranchcache = None
631 631 self._branchcachetip = None
632 632
633 633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
634 634 try:
635 635 l = lock.lock(lockname, 0, releasefn, desc=desc)
636 636 except lock.LockHeld, inst:
637 637 if not wait:
638 638 raise
639 639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
640 640 (desc, inst.locker))
641 641 # default to 600 seconds timeout
642 642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
643 643 releasefn, desc=desc)
644 644 if acquirefn:
645 645 acquirefn()
646 646 return l
647 647
648 648 def lock(self, wait=True):
649 649 if self._lockref and self._lockref():
650 650 return self._lockref()
651 651
652 652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
653 653 _('repository %s') % self.origroot)
654 654 self._lockref = weakref.ref(l)
655 655 return l
656 656
657 657 def wlock(self, wait=True):
658 658 if self._wlockref and self._wlockref():
659 659 return self._wlockref()
660 660
661 661 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
662 662 self.dirstate.invalidate, _('working directory of %s') %
663 663 self.origroot)
664 664 self._wlockref = weakref.ref(l)
665 665 return l
666 666
667 667 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
668 668 """
669 669 commit an individual file as part of a larger transaction
670 670 """
671 671
672 672 fn = fctx.path()
673 673 t = fctx.data()
674 674 fl = self.file(fn)
675 675 fp1 = manifest1.get(fn, nullid)
676 676 fp2 = manifest2.get(fn, nullid)
677 677
678 678 meta = {}
679 679 cp = fctx.renamed()
680 680 if cp and cp[0] != fn:
681 681 # Mark the new revision of this file as a copy of another
682 682 # file. This copy data will effectively act as a parent
683 683 # of this new revision. If this is a merge, the first
684 684 # parent will be the nullid (meaning "look up the copy data")
685 685 # and the second one will be the other parent. For example:
686 686 #
687 687 # 0 --- 1 --- 3 rev1 changes file foo
688 688 # \ / rev2 renames foo to bar and changes it
689 689 # \- 2 -/ rev3 should have bar with all changes and
690 690 # should record that bar descends from
691 691 # bar in rev2 and foo in rev1
692 692 #
693 693 # this allows this merge to succeed:
694 694 #
695 695 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
696 696 # \ / merging rev3 and rev4 should use bar@rev2
697 697 # \- 2 --- 4 as the merge base
698 698 #
699 699
700 700 cf = cp[0]
701 701 cr = manifest1.get(cf)
702 702 nfp = fp2
703 703
704 704 if manifest2: # branch merge
705 705 if fp2 == nullid: # copied on remote side
706 706 if fp1 != nullid or cf in manifest2:
707 707 cr = manifest2[cf]
708 708 nfp = fp1
709 709
710 710 # find source in nearest ancestor if we've lost track
711 711 if not cr:
712 712 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
713 713 (fn, cf))
714 714 for a in self['.'].ancestors():
715 715 if cf in a:
716 716 cr = a[cf].filenode()
717 717 break
718 718
719 719 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
720 720 meta["copy"] = cf
721 721 meta["copyrev"] = hex(cr)
722 722 fp1, fp2 = nullid, nfp
723 723 elif fp2 != nullid:
724 724 # is one parent an ancestor of the other?
725 725 fpa = fl.ancestor(fp1, fp2)
726 726 if fpa == fp1:
727 727 fp1, fp2 = fp2, nullid
728 728 elif fpa == fp2:
729 729 fp2 = nullid
730 730
731 731 # is the file unmodified from the parent? report existing entry
732 732 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
733 733 return fp1
734 734
735 735 changelist.append(fn)
736 736 return fl.add(t, meta, tr, linkrev, fp1, fp2)
737 737
738 738 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
739 739 if p1 is None:
740 740 p1, p2 = self.dirstate.parents()
741 741 return self.commit(files=files, text=text, user=user, date=date,
742 742 p1=p1, p2=p2, extra=extra, empty_ok=True)
743 743
744 744 def commit(self, files=None, text="", user=None, date=None,
745 745 match=None, force=False, force_editor=False,
746 746 p1=None, p2=None, extra={}, empty_ok=False):
747 747 wlock = lock = None
748 748 if files:
749 749 files = util.unique(files)
750 750 try:
751 751 wlock = self.wlock()
752 752 lock = self.lock()
753 753 use_dirstate = (p1 is None) # not rawcommit
754 754
755 755 if use_dirstate:
756 756 p1, p2 = self.dirstate.parents()
757 757 update_dirstate = True
758 758
759 759 if (not force and p2 != nullid and
760 760 (match and (match.files() or match.anypats()))):
761 761 raise util.Abort(_('cannot partially commit a merge '
762 762 '(do not specify files or patterns)'))
763 763
764 764 if files:
765 765 modified, removed = [], []
766 766 for f in files:
767 767 s = self.dirstate[f]
768 768 if s in 'nma':
769 769 modified.append(f)
770 770 elif s == 'r':
771 771 removed.append(f)
772 772 else:
773 773 self.ui.warn(_("%s not tracked!\n") % f)
774 774 changes = [modified, [], removed, [], []]
775 775 else:
776 776 changes = self.status(match=match)
777 777 else:
778 778 p1, p2 = p1, p2 or nullid
779 779 update_dirstate = (self.dirstate.parents()[0] == p1)
780 780 changes = [files, [], [], [], []]
781 781
782 782 ms = merge_.mergestate(self)
783 783 for f in changes[0]:
784 784 if f in ms and ms[f] == 'u':
785 785 raise util.Abort(_("unresolved merge conflicts "
786 786 "(see hg resolve)"))
787 787 wctx = context.workingctx(self, (p1, p2), text, user, date,
788 788 extra, changes)
789 789 return self._commitctx(wctx, force, force_editor, empty_ok,
790 790 use_dirstate, update_dirstate)
791 791 finally:
792 792 del lock, wlock
793 793
794 794 def commitctx(self, ctx):
795 795 """Add a new revision to current repository.
796 796
797 797 Revision information is passed in the context.memctx argument.
798 798 commitctx() does not touch the working directory.
799 799 """
800 800 wlock = lock = None
801 801 try:
802 802 wlock = self.wlock()
803 803 lock = self.lock()
804 804 return self._commitctx(ctx, force=True, force_editor=False,
805 805 empty_ok=True, use_dirstate=False,
806 806 update_dirstate=False)
807 807 finally:
808 808 del lock, wlock
809 809
810 810 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
811 811 use_dirstate=True, update_dirstate=True):
812 812 tr = None
813 813 valid = 0 # don't save the dirstate if this isn't set
814 814 try:
815 815 commit = util.sort(wctx.modified() + wctx.added())
816 816 remove = wctx.removed()
817 817 extra = wctx.extra().copy()
818 818 branchname = extra['branch']
819 819 user = wctx.user()
820 820 text = wctx.description()
821 821
822 822 p1, p2 = [p.node() for p in wctx.parents()]
823 823 c1 = self.changelog.read(p1)
824 824 c2 = self.changelog.read(p2)
825 825 m1 = self.manifest.read(c1[0]).copy()
826 826 m2 = self.manifest.read(c2[0])
827 827
828 828 if use_dirstate:
829 829 oldname = c1[5].get("branch") # stored in UTF-8
830 830 if (not commit and not remove and not force and p2 == nullid
831 831 and branchname == oldname):
832 832 self.ui.status(_("nothing changed\n"))
833 833 return None
834 834
835 835 xp1 = hex(p1)
836 836 if p2 == nullid: xp2 = ''
837 837 else: xp2 = hex(p2)
838 838
839 839 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
840 840
841 841 tr = self.transaction()
842 842 trp = weakref.proxy(tr)
843 843
844 844 # check in files
845 845 new = {}
846 846 changed = []
847 847 linkrev = len(self)
848 848 for f in commit:
849 849 self.ui.note(f + "\n")
850 850 try:
851 851 fctx = wctx.filectx(f)
852 852 newflags = fctx.flags()
853 853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
854 854 if ((not changed or changed[-1] != f) and
855 855 m2.get(f) != new[f]):
856 856 # mention the file in the changelog if some
857 857 # flag changed, even if there was no content
858 858 # change.
859 859 if m1.flags(f) != newflags:
860 860 changed.append(f)
861 861 m1.set(f, newflags)
862 862 if use_dirstate:
863 863 self.dirstate.normal(f)
864 864
865 865 except (OSError, IOError):
866 866 if use_dirstate:
867 867 self.ui.warn(_("trouble committing %s!\n") % f)
868 868 raise
869 869 else:
870 870 remove.append(f)
871 871
872 872 updated, added = [], []
873 873 for f in util.sort(changed):
874 874 if f in m1 or f in m2:
875 875 updated.append(f)
876 876 else:
877 877 added.append(f)
878 878
879 879 # update manifest
880 880 m1.update(new)
881 881 removed = []
882 882
883 883 for f in util.sort(remove):
884 884 if f in m1:
885 885 del m1[f]
886 886 removed.append(f)
887 887 elif f in m2:
888 888 removed.append(f)
889 889 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
890 890 (new, removed))
891 891
892 892 # add changeset
893 893 if (not empty_ok and not text) or force_editor:
894 894 edittext = []
895 895 if text:
896 896 edittext.append(text)
897 897 edittext.append("")
898 898 edittext.append("") # Empty line between message and comments.
899 899 edittext.append(_("HG: Enter commit message."
900 900 " Lines beginning with 'HG:' are removed."))
901 901 edittext.append("HG: --")
902 902 edittext.append("HG: user: %s" % user)
903 903 if p2 != nullid:
904 904 edittext.append("HG: branch merge")
905 905 if branchname:
906 906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
907 907 edittext.extend(["HG: added %s" % f for f in added])
908 908 edittext.extend(["HG: changed %s" % f for f in updated])
909 909 edittext.extend(["HG: removed %s" % f for f in removed])
910 910 if not added and not updated and not removed:
911 911 edittext.append("HG: no files changed")
912 912 edittext.append("")
913 913 # run editor in the repository root
914 914 olddir = os.getcwd()
915 915 os.chdir(self.root)
916 916 text = self.ui.edit("\n".join(edittext), user)
917 917 os.chdir(olddir)
918 918
919 919 lines = [line.rstrip() for line in text.rstrip().splitlines()]
920 920 while lines and not lines[0]:
921 921 del lines[0]
922 922 if not lines and use_dirstate:
923 923 raise util.Abort(_("empty commit message"))
924 924 text = '\n'.join(lines)
925 925
926 926 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
927 927 user, wctx.date(), extra)
928 928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
929 929 parent2=xp2)
930 930 tr.close()
931 931
932 932 if self.branchcache:
933 933 self.branchtags()
934 934
935 935 if use_dirstate or update_dirstate:
936 936 self.dirstate.setparents(n)
937 937 if use_dirstate:
938 938 for f in removed:
939 939 self.dirstate.forget(f)
940 940 valid = 1 # our dirstate updates are complete
941 941
942 942 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
943 943 return n
944 944 finally:
945 945 if not valid: # don't save our updated dirstate
946 946 self.dirstate.invalidate()
947 947 del tr
948 948
949 949 def walk(self, match, node=None):
950 950 '''
951 951 walk recursively through the directory tree or a given
952 952 changeset, finding all files matched by the match
953 953 function
954 954 '''
955 955 return self[node].walk(match)
956 956
957 957 def status(self, node1='.', node2=None, match=None,
958 958 ignored=False, clean=False, unknown=False):
959 959 """return status of files between two nodes or node and working directory
960 960
961 961 If node1 is None, use the first dirstate parent instead.
962 962 If node2 is None, compare node1 with working directory.
963 963 """
964 964
965 965 def mfmatches(ctx):
966 966 mf = ctx.manifest().copy()
967 967 for fn in mf.keys():
968 968 if not match(fn):
969 969 del mf[fn]
970 970 return mf
971 971
972 972 if isinstance(node1, context.changectx):
973 973 ctx1 = node1
974 974 else:
975 975 ctx1 = self[node1]
976 976 if isinstance(node2, context.changectx):
977 977 ctx2 = node2
978 978 else:
979 979 ctx2 = self[node2]
980 980
981 981 working = ctx2 == self[None]
982 982 parentworking = working and ctx1 == self['.']
983 983 match = match or match_.always(self.root, self.getcwd())
984 984 listignored, listclean, listunknown = ignored, clean, unknown
985 985
986 986 # load earliest manifest first for caching reasons
987 987 if not working and ctx2.rev() < ctx1.rev():
988 988 ctx2.manifest()
989 989
990 990 if not parentworking:
991 991 def bad(f, msg):
992 992 if f not in ctx1:
993 993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
994 994 return False
995 995 match.bad = bad
996 996
997 997 if working: # we need to scan the working dir
998 998 s = self.dirstate.status(match, listignored, listclean, listunknown)
999 999 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1000 1000
1001 1001 # check for any possibly clean files
1002 1002 if parentworking and cmp:
1003 1003 fixup = []
1004 1004 # do a full compare of any files that might have changed
1005 1005 for f in cmp:
1006 1006 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1007 1007 or ctx1[f].cmp(ctx2[f].data())):
1008 1008 modified.append(f)
1009 1009 else:
1010 1010 fixup.append(f)
1011 1011
1012 1012 if listclean:
1013 1013 clean += fixup
1014 1014
1015 1015 # update dirstate for files that are actually clean
1016 1016 if fixup:
1017 1017 wlock = None
1018 1018 try:
1019 1019 try:
1020 1020 wlock = self.wlock(False)
1021 1021 for f in fixup:
1022 1022 self.dirstate.normal(f)
1023 1023 except lock.LockException:
1024 1024 pass
1025 1025 finally:
1026 1026 del wlock
1027 1027
1028 1028 if not parentworking:
1029 1029 mf1 = mfmatches(ctx1)
1030 1030 if working:
1031 1031 # we are comparing working dir against non-parent
1032 1032 # generate a pseudo-manifest for the working dir
1033 1033 mf2 = mfmatches(self['.'])
1034 1034 for f in cmp + modified + added:
1035 1035 mf2[f] = None
1036 1036 mf2.set(f, ctx2.flags(f))
1037 1037 for f in removed:
1038 1038 if f in mf2:
1039 1039 del mf2[f]
1040 1040 else:
1041 1041 # we are comparing two revisions
1042 1042 deleted, unknown, ignored = [], [], []
1043 1043 mf2 = mfmatches(ctx2)
1044 1044
1045 1045 modified, added, clean = [], [], []
1046 1046 for fn in mf2:
1047 1047 if fn in mf1:
1048 1048 if (mf1.flags(fn) != mf2.flags(fn) or
1049 1049 (mf1[fn] != mf2[fn] and
1050 1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1051 1051 modified.append(fn)
1052 1052 elif listclean:
1053 1053 clean.append(fn)
1054 1054 del mf1[fn]
1055 1055 else:
1056 1056 added.append(fn)
1057 1057 removed = mf1.keys()
1058 1058
1059 1059 r = modified, added, removed, deleted, unknown, ignored, clean
1060 1060 [l.sort() for l in r]
1061 1061 return r
1062 1062
1063 1063 def add(self, list):
1064 1064 wlock = self.wlock()
1065 1065 try:
1066 1066 rejected = []
1067 1067 for f in list:
1068 1068 p = self.wjoin(f)
1069 1069 try:
1070 1070 st = os.lstat(p)
1071 1071 except:
1072 1072 self.ui.warn(_("%s does not exist!\n") % f)
1073 1073 rejected.append(f)
1074 1074 continue
1075 1075 if st.st_size > 10000000:
1076 1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1077 1077 " performance problems\n"
1078 1078 "(use 'hg revert %s' to unadd the file)\n")
1079 1079 % (f, f))
1080 1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1081 1081 self.ui.warn(_("%s not added: only files and symlinks "
1082 1082 "supported currently\n") % f)
1083 1083 rejected.append(p)
1084 1084 elif self.dirstate[f] in 'amn':
1085 1085 self.ui.warn(_("%s already tracked!\n") % f)
1086 1086 elif self.dirstate[f] == 'r':
1087 1087 self.dirstate.normallookup(f)
1088 1088 else:
1089 1089 self.dirstate.add(f)
1090 1090 return rejected
1091 1091 finally:
1092 1092 del wlock
1093 1093
1094 1094 def forget(self, list):
1095 1095 wlock = self.wlock()
1096 1096 try:
1097 1097 for f in list:
1098 1098 if self.dirstate[f] != 'a':
1099 1099 self.ui.warn(_("%s not added!\n") % f)
1100 1100 else:
1101 1101 self.dirstate.forget(f)
1102 1102 finally:
1103 1103 del wlock
1104 1104
1105 1105 def remove(self, list, unlink=False):
1106 1106 wlock = None
1107 1107 try:
1108 1108 if unlink:
1109 1109 for f in list:
1110 1110 try:
1111 1111 util.unlink(self.wjoin(f))
1112 1112 except OSError, inst:
1113 1113 if inst.errno != errno.ENOENT:
1114 1114 raise
1115 1115 wlock = self.wlock()
1116 1116 for f in list:
1117 1117 if unlink and os.path.exists(self.wjoin(f)):
1118 1118 self.ui.warn(_("%s still exists!\n") % f)
1119 1119 elif self.dirstate[f] == 'a':
1120 1120 self.dirstate.forget(f)
1121 1121 elif f not in self.dirstate:
1122 1122 self.ui.warn(_("%s not tracked!\n") % f)
1123 1123 else:
1124 1124 self.dirstate.remove(f)
1125 1125 finally:
1126 1126 del wlock
1127 1127
1128 1128 def undelete(self, list):
1129 1129 wlock = None
1130 1130 try:
1131 1131 manifests = [self.manifest.read(self.changelog.read(p)[0])
1132 1132 for p in self.dirstate.parents() if p != nullid]
1133 1133 wlock = self.wlock()
1134 1134 for f in list:
1135 1135 if self.dirstate[f] != 'r':
1136 1136 self.ui.warn(_("%s not removed!\n") % f)
1137 1137 else:
1138 1138 m = f in manifests[0] and manifests[0] or manifests[1]
1139 1139 t = self.file(f).read(m[f])
1140 1140 self.wwrite(f, t, m.flags(f))
1141 1141 self.dirstate.normal(f)
1142 1142 finally:
1143 1143 del wlock
1144 1144
1145 1145 def copy(self, source, dest):
1146 1146 wlock = None
1147 1147 try:
1148 1148 p = self.wjoin(dest)
1149 1149 if not (os.path.exists(p) or os.path.islink(p)):
1150 1150 self.ui.warn(_("%s does not exist!\n") % dest)
1151 1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1152 1152 self.ui.warn(_("copy failed: %s is not a file or a "
1153 1153 "symbolic link\n") % dest)
1154 1154 else:
1155 1155 wlock = self.wlock()
1156 1156 if self.dirstate[dest] in '?r':
1157 1157 self.dirstate.add(dest)
1158 1158 self.dirstate.copy(source, dest)
1159 1159 finally:
1160 1160 del wlock
1161 1161
1162 1162 def heads(self, start=None):
1163 1163 heads = self.changelog.heads(start)
1164 1164 # sort the output in rev descending order
1165 1165 heads = [(-self.changelog.rev(h), h) for h in heads]
1166 1166 return [n for (r, n) in util.sort(heads)]
1167 1167
1168 1168 def branchheads(self, branch=None, start=None):
1169 1169 if branch is None:
1170 1170 branch = self[None].branch()
1171 1171 branches = self.branchtags()
1172 1172 if branch not in branches:
1173 1173 return []
1174 1174 # The basic algorithm is this:
1175 1175 #
1176 1176 # Start from the branch tip since there are no later revisions that can
1177 1177 # possibly be in this branch, and the tip is a guaranteed head.
1178 1178 #
1179 1179 # Remember the tip's parents as the first ancestors, since these by
1180 1180 # definition are not heads.
1181 1181 #
1182 1182 # Step backwards from the brach tip through all the revisions. We are
1183 1183 # guaranteed by the rules of Mercurial that we will now be visiting the
1184 1184 # nodes in reverse topological order (children before parents).
1185 1185 #
1186 1186 # If a revision is one of the ancestors of a head then we can toss it
1187 1187 # out of the ancestors set (we've already found it and won't be
1188 1188 # visiting it again) and put its parents in the ancestors set.
1189 1189 #
1190 1190 # Otherwise, if a revision is in the branch it's another head, since it
1191 1191 # wasn't in the ancestor list of an existing head. So add it to the
1192 1192 # head list, and add its parents to the ancestor list.
1193 1193 #
1194 1194 # If it is not in the branch ignore it.
1195 1195 #
1196 1196 # Once we have a list of heads, use nodesbetween to filter out all the
1197 1197 # heads that cannot be reached from startrev. There may be a more
1198 1198 # efficient way to do this as part of the previous algorithm.
1199 1199
1200 1200 set = util.set
1201 1201 heads = [self.changelog.rev(branches[branch])]
1202 1202 # Don't care if ancestors contains nullrev or not.
1203 1203 ancestors = set(self.changelog.parentrevs(heads[0]))
1204 1204 for rev in xrange(heads[0] - 1, nullrev, -1):
1205 1205 if rev in ancestors:
1206 1206 ancestors.update(self.changelog.parentrevs(rev))
1207 1207 ancestors.remove(rev)
1208 1208 elif self[rev].branch() == branch:
1209 1209 heads.append(rev)
1210 1210 ancestors.update(self.changelog.parentrevs(rev))
1211 1211 heads = [self.changelog.node(rev) for rev in heads]
1212 1212 if start is not None:
1213 1213 heads = self.changelog.nodesbetween([start], heads)[2]
1214 1214 return heads
1215 1215
1216 1216 def branches(self, nodes):
1217 1217 if not nodes:
1218 1218 nodes = [self.changelog.tip()]
1219 1219 b = []
1220 1220 for n in nodes:
1221 1221 t = n
1222 1222 while 1:
1223 1223 p = self.changelog.parents(n)
1224 1224 if p[1] != nullid or p[0] == nullid:
1225 1225 b.append((t, n, p[0], p[1]))
1226 1226 break
1227 1227 n = p[0]
1228 1228 return b
1229 1229
1230 1230 def between(self, pairs):
1231 1231 r = []
1232 1232
1233 1233 for top, bottom in pairs:
1234 1234 n, l, i = top, [], 0
1235 1235 f = 1
1236 1236
1237 1237 while n != bottom:
1238 1238 p = self.changelog.parents(n)[0]
1239 1239 if i == f:
1240 1240 l.append(n)
1241 1241 f = f * 2
1242 1242 n = p
1243 1243 i += 1
1244 1244
1245 1245 r.append(l)
1246 1246
1247 1247 return r
1248 1248
1249 1249 def findincoming(self, remote, base=None, heads=None, force=False):
1250 1250 """Return list of roots of the subsets of missing nodes from remote
1251 1251
1252 1252 If base dict is specified, assume that these nodes and their parents
1253 1253 exist on the remote side and that no child of a node of base exists
1254 1254 in both remote and self.
1255 1255 Furthermore base will be updated to include the nodes that exists
1256 1256 in self and remote but no children exists in self and remote.
1257 1257 If a list of heads is specified, return only nodes which are heads
1258 1258 or ancestors of these heads.
1259 1259
1260 1260 All the ancestors of base are in self and in remote.
1261 1261 All the descendants of the list returned are missing in self.
1262 1262 (and so we know that the rest of the nodes are missing in remote, see
1263 1263 outgoing)
1264 1264 """
1265 1265 m = self.changelog.nodemap
1266 1266 search = []
1267 1267 fetch = {}
1268 1268 seen = {}
1269 1269 seenbranch = {}
1270 1270 if base == None:
1271 1271 base = {}
1272 1272
1273 1273 if not heads:
1274 1274 heads = remote.heads()
1275 1275
1276 1276 if self.changelog.tip() == nullid:
1277 1277 base[nullid] = 1
1278 1278 if heads != [nullid]:
1279 1279 return [nullid]
1280 1280 return []
1281 1281
1282 1282 # assume we're closer to the tip than the root
1283 1283 # and start by examining the heads
1284 1284 self.ui.status(_("searching for changes\n"))
1285 1285
1286 1286 unknown = []
1287 1287 for h in heads:
1288 1288 if h not in m:
1289 1289 unknown.append(h)
1290 1290 else:
1291 1291 base[h] = 1
1292 1292
1293 1293 if not unknown:
1294 1294 return []
1295 1295
1296 1296 req = dict.fromkeys(unknown)
1297 1297 reqcnt = 0
1298 1298
1299 1299 # search through remote branches
1300 1300 # a 'branch' here is a linear segment of history, with four parts:
1301 1301 # head, root, first parent, second parent
1302 1302 # (a branch always has two parents (or none) by definition)
1303 1303 unknown = remote.branches(unknown)
1304 1304 while unknown:
1305 1305 r = []
1306 1306 while unknown:
1307 1307 n = unknown.pop(0)
1308 1308 if n[0] in seen:
1309 1309 continue
1310 1310
1311 1311 self.ui.debug(_("examining %s:%s\n")
1312 1312 % (short(n[0]), short(n[1])))
1313 1313 if n[0] == nullid: # found the end of the branch
1314 1314 pass
1315 1315 elif n in seenbranch:
1316 1316 self.ui.debug(_("branch already found\n"))
1317 1317 continue
1318 1318 elif n[1] and n[1] in m: # do we know the base?
1319 1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1320 1320 % (short(n[0]), short(n[1])))
1321 1321 search.append(n[0:2]) # schedule branch range for scanning
1322 1322 seenbranch[n] = 1
1323 1323 else:
1324 1324 if n[1] not in seen and n[1] not in fetch:
1325 1325 if n[2] in m and n[3] in m:
1326 1326 self.ui.debug(_("found new changeset %s\n") %
1327 1327 short(n[1]))
1328 1328 fetch[n[1]] = 1 # earliest unknown
1329 1329 for p in n[2:4]:
1330 1330 if p in m:
1331 1331 base[p] = 1 # latest known
1332 1332
1333 1333 for p in n[2:4]:
1334 1334 if p not in req and p not in m:
1335 1335 r.append(p)
1336 1336 req[p] = 1
1337 1337 seen[n[0]] = 1
1338 1338
1339 1339 if r:
1340 1340 reqcnt += 1
1341 1341 self.ui.debug(_("request %d: %s\n") %
1342 1342 (reqcnt, " ".join(map(short, r))))
1343 1343 for p in xrange(0, len(r), 10):
1344 1344 for b in remote.branches(r[p:p+10]):
1345 1345 self.ui.debug(_("received %s:%s\n") %
1346 1346 (short(b[0]), short(b[1])))
1347 1347 unknown.append(b)
1348 1348
1349 1349 # do binary search on the branches we found
1350 1350 while search:
1351 1351 newsearch = []
1352 1352 reqcnt += 1
1353 1353 for n, l in zip(search, remote.between(search)):
1354 1354 l.append(n[1])
1355 1355 p = n[0]
1356 1356 f = 1
1357 1357 for i in l:
1358 1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1359 1359 if i in m:
1360 1360 if f <= 2:
1361 1361 self.ui.debug(_("found new branch changeset %s\n") %
1362 1362 short(p))
1363 1363 fetch[p] = 1
1364 1364 base[i] = 1
1365 1365 else:
1366 1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1367 1367 % (short(p), short(i)))
1368 1368 newsearch.append((p, i))
1369 1369 break
1370 1370 p, f = i, f * 2
1371 1371 search = newsearch
1372 1372
1373 1373 # sanity check our fetch list
1374 1374 for f in fetch.keys():
1375 1375 if f in m:
1376 1376 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1377 1377
1378 1378 if base.keys() == [nullid]:
1379 1379 if force:
1380 1380 self.ui.warn(_("warning: repository is unrelated\n"))
1381 1381 else:
1382 1382 raise util.Abort(_("repository is unrelated"))
1383 1383
1384 1384 self.ui.debug(_("found new changesets starting at ") +
1385 1385 " ".join([short(f) for f in fetch]) + "\n")
1386 1386
1387 1387 self.ui.debug(_("%d total queries\n") % reqcnt)
1388 1388
1389 1389 return fetch.keys()
1390 1390
1391 1391 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 1392 """Return list of nodes that are roots of subsets not in remote
1393 1393
1394 1394 If base dict is specified, assume that these nodes and their parents
1395 1395 exist on the remote side.
1396 1396 If a list of heads is specified, return only nodes which are heads
1397 1397 or ancestors of these heads, and return a second element which
1398 1398 contains all remote heads which get new children.
1399 1399 """
1400 1400 if base == None:
1401 1401 base = {}
1402 1402 self.findincoming(remote, base, heads, force=force)
1403 1403
1404 1404 self.ui.debug(_("common changesets up to ")
1405 1405 + " ".join(map(short, base.keys())) + "\n")
1406 1406
1407 1407 remain = dict.fromkeys(self.changelog.nodemap)
1408 1408
1409 1409 # prune everything remote has from the tree
1410 1410 del remain[nullid]
1411 1411 remove = base.keys()
1412 1412 while remove:
1413 1413 n = remove.pop(0)
1414 1414 if n in remain:
1415 1415 del remain[n]
1416 1416 for p in self.changelog.parents(n):
1417 1417 remove.append(p)
1418 1418
1419 1419 # find every node whose parents have been pruned
1420 1420 subset = []
1421 1421 # find every remote head that will get new children
1422 1422 updated_heads = {}
1423 1423 for n in remain:
1424 1424 p1, p2 = self.changelog.parents(n)
1425 1425 if p1 not in remain and p2 not in remain:
1426 1426 subset.append(n)
1427 1427 if heads:
1428 1428 if p1 in heads:
1429 1429 updated_heads[p1] = True
1430 1430 if p2 in heads:
1431 1431 updated_heads[p2] = True
1432 1432
1433 1433 # this is the set of all roots we have to push
1434 1434 if heads:
1435 1435 return subset, updated_heads.keys()
1436 1436 else:
1437 1437 return subset
1438 1438
1439 1439 def pull(self, remote, heads=None, force=False):
1440 1440 lock = self.lock()
1441 1441 try:
1442 1442 fetch = self.findincoming(remote, heads=heads, force=force)
1443 1443 if fetch == [nullid]:
1444 1444 self.ui.status(_("requesting all changes\n"))
1445 1445
1446 1446 if not fetch:
1447 1447 self.ui.status(_("no changes found\n"))
1448 1448 return 0
1449 1449
1450 1450 if heads is None:
1451 1451 cg = remote.changegroup(fetch, 'pull')
1452 1452 else:
1453 1453 if 'changegroupsubset' not in remote.capabilities:
1454 1454 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1455 1455 cg = remote.changegroupsubset(fetch, heads, 'pull')
1456 1456 return self.addchangegroup(cg, 'pull', remote.url())
1457 1457 finally:
1458 1458 del lock
1459 1459
1460 1460 def push(self, remote, force=False, revs=None):
1461 1461 # there are two ways to push to remote repo:
1462 1462 #
1463 1463 # addchangegroup assumes local user can lock remote
1464 1464 # repo (local filesystem, old ssh servers).
1465 1465 #
1466 1466 # unbundle assumes local user cannot lock remote repo (new ssh
1467 1467 # servers, http servers).
1468 1468
1469 1469 if remote.capable('unbundle'):
1470 1470 return self.push_unbundle(remote, force, revs)
1471 1471 return self.push_addchangegroup(remote, force, revs)
1472 1472
1473 1473 def prepush(self, remote, force, revs):
1474 1474 base = {}
1475 1475 remote_heads = remote.heads()
1476 1476 inc = self.findincoming(remote, base, remote_heads, force=force)
1477 1477
1478 1478 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1479 1479 if revs is not None:
1480 1480 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1481 1481 else:
1482 1482 bases, heads = update, self.changelog.heads()
1483 1483
1484 1484 if not bases:
1485 1485 self.ui.status(_("no changes found\n"))
1486 1486 return None, 1
1487 1487 elif not force:
1488 1488 # check if we're creating new remote heads
1489 1489 # to be a remote head after push, node must be either
1490 1490 # - unknown locally
1491 1491 # - a local outgoing head descended from update
1492 1492 # - a remote head that's known locally and not
1493 1493 # ancestral to an outgoing head
1494 1494
1495 1495 warn = 0
1496 1496
1497 1497 if remote_heads == [nullid]:
1498 1498 warn = 0
1499 1499 elif not revs and len(heads) > len(remote_heads):
1500 1500 warn = 1
1501 1501 else:
1502 1502 newheads = list(heads)
1503 1503 for r in remote_heads:
1504 1504 if r in self.changelog.nodemap:
1505 1505 desc = self.changelog.heads(r, heads)
1506 1506 l = [h for h in heads if h in desc]
1507 1507 if not l:
1508 1508 newheads.append(r)
1509 1509 else:
1510 1510 newheads.append(r)
1511 1511 if len(newheads) > len(remote_heads):
1512 1512 warn = 1
1513 1513
1514 1514 if warn:
1515 1515 self.ui.warn(_("abort: push creates new remote heads!\n"))
1516 1516 self.ui.status(_("(did you forget to merge?"
1517 1517 " use push -f to force)\n"))
1518 1518 return None, 0
1519 1519 elif inc:
1520 1520 self.ui.warn(_("note: unsynced remote changes!\n"))
1521 1521
1522 1522
1523 1523 if revs is None:
1524 1524 cg = self.changegroup(update, 'push')
1525 1525 else:
1526 1526 cg = self.changegroupsubset(update, revs, 'push')
1527 1527 return cg, remote_heads
1528 1528
1529 1529 def push_addchangegroup(self, remote, force, revs):
1530 1530 lock = remote.lock()
1531 1531 try:
1532 1532 ret = self.prepush(remote, force, revs)
1533 1533 if ret[0] is not None:
1534 1534 cg, remote_heads = ret
1535 1535 return remote.addchangegroup(cg, 'push', self.url())
1536 1536 return ret[1]
1537 1537 finally:
1538 1538 del lock
1539 1539
1540 1540 def push_unbundle(self, remote, force, revs):
1541 1541 # local repo finds heads on server, finds out what revs it
1542 1542 # must push. once revs transferred, if server finds it has
1543 1543 # different heads (someone else won commit/push race), server
1544 1544 # aborts.
1545 1545
1546 1546 ret = self.prepush(remote, force, revs)
1547 1547 if ret[0] is not None:
1548 1548 cg, remote_heads = ret
1549 1549 if force: remote_heads = ['force']
1550 1550 return remote.unbundle(cg, remote_heads, 'push')
1551 1551 return ret[1]
1552 1552
1553 1553 def changegroupinfo(self, nodes, source):
1554 1554 if self.ui.verbose or source == 'bundle':
1555 1555 self.ui.status(_("%d changesets found\n") % len(nodes))
1556 1556 if self.ui.debugflag:
1557 1557 self.ui.debug(_("List of changesets:\n"))
1558 1558 for node in nodes:
1559 1559 self.ui.debug("%s\n" % hex(node))
1560 1560
1561 1561 def changegroupsubset(self, bases, heads, source, extranodes=None):
1562 1562 """This function generates a changegroup consisting of all the nodes
1563 1563 that are descendents of any of the bases, and ancestors of any of
1564 1564 the heads.
1565 1565
1566 1566 It is fairly complex as determining which filenodes and which
1567 1567 manifest nodes need to be included for the changeset to be complete
1568 1568 is non-trivial.
1569 1569
1570 1570 Another wrinkle is doing the reverse, figuring out which changeset in
1571 1571 the changegroup a particular filenode or manifestnode belongs to.
1572 1572
1573 1573 The caller can specify some nodes that must be included in the
1574 1574 changegroup using the extranodes argument. It should be a dict
1575 1575 where the keys are the filenames (or 1 for the manifest), and the
1576 1576 values are lists of (node, linknode) tuples, where node is a wanted
1577 1577 node and linknode is the changelog node that should be transmitted as
1578 1578 the linkrev.
1579 1579 """
1580 1580
1581 1581 if extranodes is None:
1582 1582 # can we go through the fast path ?
1583 1583 heads.sort()
1584 1584 allheads = self.heads()
1585 1585 allheads.sort()
1586 1586 if heads == allheads:
1587 1587 common = []
1588 1588 # parents of bases are known from both sides
1589 1589 for n in bases:
1590 1590 for p in self.changelog.parents(n):
1591 1591 if p != nullid:
1592 1592 common.append(p)
1593 1593 return self._changegroup(common, source)
1594 1594
1595 1595 self.hook('preoutgoing', throw=True, source=source)
1596 1596
1597 1597 # Set up some initial variables
1598 1598 # Make it easy to refer to self.changelog
1599 1599 cl = self.changelog
1600 1600 # msng is short for missing - compute the list of changesets in this
1601 1601 # changegroup.
1602 1602 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1603 1603 self.changegroupinfo(msng_cl_lst, source)
1604 1604 # Some bases may turn out to be superfluous, and some heads may be
1605 1605 # too. nodesbetween will return the minimal set of bases and heads
1606 1606 # necessary to re-create the changegroup.
1607 1607
1608 1608 # Known heads are the list of heads that it is assumed the recipient
1609 1609 # of this changegroup will know about.
1610 1610 knownheads = {}
1611 1611 # We assume that all parents of bases are known heads.
1612 1612 for n in bases:
1613 1613 for p in cl.parents(n):
1614 1614 if p != nullid:
1615 1615 knownheads[p] = 1
1616 1616 knownheads = knownheads.keys()
1617 1617 if knownheads:
1618 1618 # Now that we know what heads are known, we can compute which
1619 1619 # changesets are known. The recipient must know about all
1620 1620 # changesets required to reach the known heads from the null
1621 1621 # changeset.
1622 1622 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1623 1623 junk = None
1624 1624 # Transform the list into an ersatz set.
1625 1625 has_cl_set = dict.fromkeys(has_cl_set)
1626 1626 else:
1627 1627 # If there were no known heads, the recipient cannot be assumed to
1628 1628 # know about any changesets.
1629 1629 has_cl_set = {}
1630 1630
1631 1631 # Make it easy to refer to self.manifest
1632 1632 mnfst = self.manifest
1633 1633 # We don't know which manifests are missing yet
1634 1634 msng_mnfst_set = {}
1635 1635 # Nor do we know which filenodes are missing.
1636 1636 msng_filenode_set = {}
1637 1637
1638 1638 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1639 1639 junk = None
1640 1640
1641 1641 # A changeset always belongs to itself, so the changenode lookup
1642 1642 # function for a changenode is identity.
1643 1643 def identity(x):
1644 1644 return x
1645 1645
1646 1646 # A function generating function. Sets up an environment for the
1647 1647 # inner function.
1648 1648 def cmp_by_rev_func(revlog):
1649 1649 # Compare two nodes by their revision number in the environment's
1650 1650 # revision history. Since the revision number both represents the
1651 1651 # most efficient order to read the nodes in, and represents a
1652 1652 # topological sorting of the nodes, this function is often useful.
1653 1653 def cmp_by_rev(a, b):
1654 1654 return cmp(revlog.rev(a), revlog.rev(b))
1655 1655 return cmp_by_rev
1656 1656
1657 1657 # If we determine that a particular file or manifest node must be a
1658 1658 # node that the recipient of the changegroup will already have, we can
1659 1659 # also assume the recipient will have all the parents. This function
1660 1660 # prunes them from the set of missing nodes.
1661 1661 def prune_parents(revlog, hasset, msngset):
1662 1662 haslst = hasset.keys()
1663 1663 haslst.sort(cmp_by_rev_func(revlog))
1664 1664 for node in haslst:
1665 1665 parentlst = [p for p in revlog.parents(node) if p != nullid]
1666 1666 while parentlst:
1667 1667 n = parentlst.pop()
1668 1668 if n not in hasset:
1669 1669 hasset[n] = 1
1670 1670 p = [p for p in revlog.parents(n) if p != nullid]
1671 1671 parentlst.extend(p)
1672 1672 for n in hasset:
1673 1673 msngset.pop(n, None)
1674 1674
1675 1675 # This is a function generating function used to set up an environment
1676 1676 # for the inner function to execute in.
1677 1677 def manifest_and_file_collector(changedfileset):
1678 1678 # This is an information gathering function that gathers
1679 1679 # information from each changeset node that goes out as part of
1680 1680 # the changegroup. The information gathered is a list of which
1681 1681 # manifest nodes are potentially required (the recipient may
1682 1682 # already have them) and total list of all files which were
1683 1683 # changed in any changeset in the changegroup.
1684 1684 #
1685 1685 # We also remember the first changenode we saw any manifest
1686 1686 # referenced by so we can later determine which changenode 'owns'
1687 1687 # the manifest.
1688 1688 def collect_manifests_and_files(clnode):
1689 1689 c = cl.read(clnode)
1690 1690 for f in c[3]:
1691 1691 # This is to make sure we only have one instance of each
1692 1692 # filename string for each filename.
1693 1693 changedfileset.setdefault(f, f)
1694 1694 msng_mnfst_set.setdefault(c[0], clnode)
1695 1695 return collect_manifests_and_files
1696 1696
1697 1697 # Figure out which manifest nodes (of the ones we think might be part
1698 1698 # of the changegroup) the recipient must know about and remove them
1699 1699 # from the changegroup.
1700 1700 def prune_manifests():
1701 1701 has_mnfst_set = {}
1702 1702 for n in msng_mnfst_set:
1703 1703 # If a 'missing' manifest thinks it belongs to a changenode
1704 1704 # the recipient is assumed to have, obviously the recipient
1705 1705 # must have that manifest.
1706 linknode = cl.node(mnfst.linkrev(n))
1706 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1707 1707 if linknode in has_cl_set:
1708 1708 has_mnfst_set[n] = 1
1709 1709 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1710 1710
1711 1711 # Use the information collected in collect_manifests_and_files to say
1712 1712 # which changenode any manifestnode belongs to.
1713 1713 def lookup_manifest_link(mnfstnode):
1714 1714 return msng_mnfst_set[mnfstnode]
1715 1715
1716 1716 # A function generating function that sets up the initial environment
1717 1717 # the inner function.
1718 1718 def filenode_collector(changedfiles):
1719 1719 next_rev = [0]
1720 1720 # This gathers information from each manifestnode included in the
1721 1721 # changegroup about which filenodes the manifest node references
1722 1722 # so we can include those in the changegroup too.
1723 1723 #
1724 1724 # It also remembers which changenode each filenode belongs to. It
1725 1725 # does this by assuming the a filenode belongs to the changenode
1726 1726 # the first manifest that references it belongs to.
1727 1727 def collect_msng_filenodes(mnfstnode):
1728 1728 r = mnfst.rev(mnfstnode)
1729 1729 if r == next_rev[0]:
1730 1730 # If the last rev we looked at was the one just previous,
1731 1731 # we only need to see a diff.
1732 1732 deltamf = mnfst.readdelta(mnfstnode)
1733 1733 # For each line in the delta
1734 1734 for f, fnode in deltamf.items():
1735 1735 f = changedfiles.get(f, None)
1736 1736 # And if the file is in the list of files we care
1737 1737 # about.
1738 1738 if f is not None:
1739 1739 # Get the changenode this manifest belongs to
1740 1740 clnode = msng_mnfst_set[mnfstnode]
1741 1741 # Create the set of filenodes for the file if
1742 1742 # there isn't one already.
1743 1743 ndset = msng_filenode_set.setdefault(f, {})
1744 1744 # And set the filenode's changelog node to the
1745 1745 # manifest's if it hasn't been set already.
1746 1746 ndset.setdefault(fnode, clnode)
1747 1747 else:
1748 1748 # Otherwise we need a full manifest.
1749 1749 m = mnfst.read(mnfstnode)
1750 1750 # For every file in we care about.
1751 1751 for f in changedfiles:
1752 1752 fnode = m.get(f, None)
1753 1753 # If it's in the manifest
1754 1754 if fnode is not None:
1755 1755 # See comments above.
1756 1756 clnode = msng_mnfst_set[mnfstnode]
1757 1757 ndset = msng_filenode_set.setdefault(f, {})
1758 1758 ndset.setdefault(fnode, clnode)
1759 1759 # Remember the revision we hope to see next.
1760 1760 next_rev[0] = r + 1
1761 1761 return collect_msng_filenodes
1762 1762
1763 1763 # We have a list of filenodes we think we need for a file, lets remove
1764 1764 # all those we now the recipient must have.
1765 1765 def prune_filenodes(f, filerevlog):
1766 1766 msngset = msng_filenode_set[f]
1767 1767 hasset = {}
1768 1768 # If a 'missing' filenode thinks it belongs to a changenode we
1769 1769 # assume the recipient must have, then the recipient must have
1770 1770 # that filenode.
1771 1771 for n in msngset:
1772 clnode = cl.node(filerevlog.linkrev(n))
1772 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1773 1773 if clnode in has_cl_set:
1774 1774 hasset[n] = 1
1775 1775 prune_parents(filerevlog, hasset, msngset)
1776 1776
1777 1777 # A function generator function that sets up the a context for the
1778 1778 # inner function.
1779 1779 def lookup_filenode_link_func(fname):
1780 1780 msngset = msng_filenode_set[fname]
1781 1781 # Lookup the changenode the filenode belongs to.
1782 1782 def lookup_filenode_link(fnode):
1783 1783 return msngset[fnode]
1784 1784 return lookup_filenode_link
1785 1785
1786 1786 # Add the nodes that were explicitly requested.
1787 1787 def add_extra_nodes(name, nodes):
1788 1788 if not extranodes or name not in extranodes:
1789 1789 return
1790 1790
1791 1791 for node, linknode in extranodes[name]:
1792 1792 if node not in nodes:
1793 1793 nodes[node] = linknode
1794 1794
1795 1795 # Now that we have all theses utility functions to help out and
1796 1796 # logically divide up the task, generate the group.
1797 1797 def gengroup():
1798 1798 # The set of changed files starts empty.
1799 1799 changedfiles = {}
1800 1800 # Create a changenode group generator that will call our functions
1801 1801 # back to lookup the owning changenode and collect information.
1802 1802 group = cl.group(msng_cl_lst, identity,
1803 1803 manifest_and_file_collector(changedfiles))
1804 1804 for chnk in group:
1805 1805 yield chnk
1806 1806
1807 1807 # The list of manifests has been collected by the generator
1808 1808 # calling our functions back.
1809 1809 prune_manifests()
1810 1810 add_extra_nodes(1, msng_mnfst_set)
1811 1811 msng_mnfst_lst = msng_mnfst_set.keys()
1812 1812 # Sort the manifestnodes by revision number.
1813 1813 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1814 1814 # Create a generator for the manifestnodes that calls our lookup
1815 1815 # and data collection functions back.
1816 1816 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1817 1817 filenode_collector(changedfiles))
1818 1818 for chnk in group:
1819 1819 yield chnk
1820 1820
1821 1821 # These are no longer needed, dereference and toss the memory for
1822 1822 # them.
1823 1823 msng_mnfst_lst = None
1824 1824 msng_mnfst_set.clear()
1825 1825
1826 1826 if extranodes:
1827 1827 for fname in extranodes:
1828 1828 if isinstance(fname, int):
1829 1829 continue
1830 1830 msng_filenode_set.setdefault(fname, {})
1831 1831 changedfiles[fname] = 1
1832 1832 # Go through all our files in order sorted by name.
1833 1833 for fname in util.sort(changedfiles):
1834 1834 filerevlog = self.file(fname)
1835 1835 if not len(filerevlog):
1836 1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1837 1837 # Toss out the filenodes that the recipient isn't really
1838 1838 # missing.
1839 1839 if fname in msng_filenode_set:
1840 1840 prune_filenodes(fname, filerevlog)
1841 1841 add_extra_nodes(fname, msng_filenode_set[fname])
1842 1842 msng_filenode_lst = msng_filenode_set[fname].keys()
1843 1843 else:
1844 1844 msng_filenode_lst = []
1845 1845 # If any filenodes are left, generate the group for them,
1846 1846 # otherwise don't bother.
1847 1847 if len(msng_filenode_lst) > 0:
1848 1848 yield changegroup.chunkheader(len(fname))
1849 1849 yield fname
1850 1850 # Sort the filenodes by their revision #
1851 1851 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1852 1852 # Create a group generator and only pass in a changenode
1853 1853 # lookup function as we need to collect no information
1854 1854 # from filenodes.
1855 1855 group = filerevlog.group(msng_filenode_lst,
1856 1856 lookup_filenode_link_func(fname))
1857 1857 for chnk in group:
1858 1858 yield chnk
1859 1859 if fname in msng_filenode_set:
1860 1860 # Don't need this anymore, toss it to free memory.
1861 1861 del msng_filenode_set[fname]
1862 1862 # Signal that no more groups are left.
1863 1863 yield changegroup.closechunk()
1864 1864
1865 1865 if msng_cl_lst:
1866 1866 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1867 1867
1868 1868 return util.chunkbuffer(gengroup())
1869 1869
1870 1870 def changegroup(self, basenodes, source):
1871 1871 # to avoid a race we use changegroupsubset() (issue1320)
1872 1872 return self.changegroupsubset(basenodes, self.heads(), source)
1873 1873
1874 1874 def _changegroup(self, common, source):
1875 1875 """Generate a changegroup of all nodes that we have that a recipient
1876 1876 doesn't.
1877 1877
1878 1878 This is much easier than the previous function as we can assume that
1879 1879 the recipient has any changenode we aren't sending them.
1880 1880
1881 1881 common is the set of common nodes between remote and self"""
1882 1882
1883 1883 self.hook('preoutgoing', throw=True, source=source)
1884 1884
1885 1885 cl = self.changelog
1886 1886 nodes = cl.findmissing(common)
1887 1887 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1888 1888 self.changegroupinfo(nodes, source)
1889 1889
1890 1890 def identity(x):
1891 1891 return x
1892 1892
1893 1893 def gennodelst(log):
1894 1894 for r in log:
1895 n = log.node(r)
1896 if log.linkrev(n) in revset:
1897 yield n
1895 if log.linkrev(r) in revset:
1896 yield log.node(r)
1898 1897
1899 1898 def changed_file_collector(changedfileset):
1900 1899 def collect_changed_files(clnode):
1901 1900 c = cl.read(clnode)
1902 1901 for fname in c[3]:
1903 1902 changedfileset[fname] = 1
1904 1903 return collect_changed_files
1905 1904
1906 1905 def lookuprevlink_func(revlog):
1907 1906 def lookuprevlink(n):
1908 return cl.node(revlog.linkrev(n))
1907 return cl.node(revlog.linkrev(revlog.rev(n)))
1909 1908 return lookuprevlink
1910 1909
1911 1910 def gengroup():
1912 1911 # construct a list of all changed files
1913 1912 changedfiles = {}
1914 1913
1915 1914 for chnk in cl.group(nodes, identity,
1916 1915 changed_file_collector(changedfiles)):
1917 1916 yield chnk
1918 1917
1919 1918 mnfst = self.manifest
1920 1919 nodeiter = gennodelst(mnfst)
1921 1920 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1922 1921 yield chnk
1923 1922
1924 1923 for fname in util.sort(changedfiles):
1925 1924 filerevlog = self.file(fname)
1926 1925 if not len(filerevlog):
1927 1926 raise util.Abort(_("empty or missing revlog for %s") % fname)
1928 1927 nodeiter = gennodelst(filerevlog)
1929 1928 nodeiter = list(nodeiter)
1930 1929 if nodeiter:
1931 1930 yield changegroup.chunkheader(len(fname))
1932 1931 yield fname
1933 1932 lookup = lookuprevlink_func(filerevlog)
1934 1933 for chnk in filerevlog.group(nodeiter, lookup):
1935 1934 yield chnk
1936 1935
1937 1936 yield changegroup.closechunk()
1938 1937
1939 1938 if nodes:
1940 1939 self.hook('outgoing', node=hex(nodes[0]), source=source)
1941 1940
1942 1941 return util.chunkbuffer(gengroup())
1943 1942
1944 1943 def addchangegroup(self, source, srctype, url, emptyok=False):
1945 1944 """add changegroup to repo.
1946 1945
1947 1946 return values:
1948 1947 - nothing changed or no source: 0
1949 1948 - more heads than before: 1+added heads (2..n)
1950 1949 - less heads than before: -1-removed heads (-2..-n)
1951 1950 - number of heads stays the same: 1
1952 1951 """
1953 1952 def csmap(x):
1954 1953 self.ui.debug(_("add changeset %s\n") % short(x))
1955 1954 return len(cl)
1956 1955
1957 1956 def revmap(x):
1958 1957 return cl.rev(x)
1959 1958
1960 1959 if not source:
1961 1960 return 0
1962 1961
1963 1962 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1964 1963
1965 1964 changesets = files = revisions = 0
1966 1965
1967 1966 # write changelog data to temp files so concurrent readers will not see
1968 1967 # inconsistent view
1969 1968 cl = self.changelog
1970 1969 cl.delayupdate()
1971 1970 oldheads = len(cl.heads())
1972 1971
1973 1972 tr = self.transaction()
1974 1973 try:
1975 1974 trp = weakref.proxy(tr)
1976 1975 # pull off the changeset group
1977 1976 self.ui.status(_("adding changesets\n"))
1978 1977 cor = len(cl) - 1
1979 1978 chunkiter = changegroup.chunkiter(source)
1980 1979 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1981 1980 raise util.Abort(_("received changelog group is empty"))
1982 1981 cnr = len(cl) - 1
1983 1982 changesets = cnr - cor
1984 1983
1985 1984 # pull off the manifest group
1986 1985 self.ui.status(_("adding manifests\n"))
1987 1986 chunkiter = changegroup.chunkiter(source)
1988 1987 # no need to check for empty manifest group here:
1989 1988 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1990 1989 # no new manifest will be created and the manifest group will
1991 1990 # be empty during the pull
1992 1991 self.manifest.addgroup(chunkiter, revmap, trp)
1993 1992
1994 1993 # process the files
1995 1994 self.ui.status(_("adding file changes\n"))
1996 1995 while 1:
1997 1996 f = changegroup.getchunk(source)
1998 1997 if not f:
1999 1998 break
2000 1999 self.ui.debug(_("adding %s revisions\n") % f)
2001 2000 fl = self.file(f)
2002 2001 o = len(fl)
2003 2002 chunkiter = changegroup.chunkiter(source)
2004 2003 if fl.addgroup(chunkiter, revmap, trp) is None:
2005 2004 raise util.Abort(_("received file revlog group is empty"))
2006 2005 revisions += len(fl) - o
2007 2006 files += 1
2008 2007
2009 2008 # make changelog see real files again
2010 2009 cl.finalize(trp)
2011 2010
2012 2011 newheads = len(self.changelog.heads())
2013 2012 heads = ""
2014 2013 if oldheads and newheads != oldheads:
2015 2014 heads = _(" (%+d heads)") % (newheads - oldheads)
2016 2015
2017 2016 self.ui.status(_("added %d changesets"
2018 2017 " with %d changes to %d files%s\n")
2019 2018 % (changesets, revisions, files, heads))
2020 2019
2021 2020 if changesets > 0:
2022 2021 self.hook('pretxnchangegroup', throw=True,
2023 2022 node=hex(self.changelog.node(cor+1)), source=srctype,
2024 2023 url=url)
2025 2024
2026 2025 tr.close()
2027 2026 finally:
2028 2027 del tr
2029 2028
2030 2029 if changesets > 0:
2031 2030 # forcefully update the on-disk branch cache
2032 2031 self.ui.debug(_("updating the branch cache\n"))
2033 2032 self.branchtags()
2034 2033 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2035 2034 source=srctype, url=url)
2036 2035
2037 2036 for i in xrange(cor + 1, cnr + 1):
2038 2037 self.hook("incoming", node=hex(self.changelog.node(i)),
2039 2038 source=srctype, url=url)
2040 2039
2041 2040 # never return 0 here:
2042 2041 if newheads < oldheads:
2043 2042 return newheads - oldheads - 1
2044 2043 else:
2045 2044 return newheads - oldheads + 1
2046 2045
2047 2046
2048 2047 def stream_in(self, remote):
2049 2048 fp = remote.stream_out()
2050 2049 l = fp.readline()
2051 2050 try:
2052 2051 resp = int(l)
2053 2052 except ValueError:
2054 2053 raise util.UnexpectedOutput(
2055 2054 _('Unexpected response from remote server:'), l)
2056 2055 if resp == 1:
2057 2056 raise util.Abort(_('operation forbidden by server'))
2058 2057 elif resp == 2:
2059 2058 raise util.Abort(_('locking the remote repository failed'))
2060 2059 elif resp != 0:
2061 2060 raise util.Abort(_('the server sent an unknown error code'))
2062 2061 self.ui.status(_('streaming all changes\n'))
2063 2062 l = fp.readline()
2064 2063 try:
2065 2064 total_files, total_bytes = map(int, l.split(' ', 1))
2066 2065 except (ValueError, TypeError):
2067 2066 raise util.UnexpectedOutput(
2068 2067 _('Unexpected response from remote server:'), l)
2069 2068 self.ui.status(_('%d files to transfer, %s of data\n') %
2070 2069 (total_files, util.bytecount(total_bytes)))
2071 2070 start = time.time()
2072 2071 for i in xrange(total_files):
2073 2072 # XXX doesn't support '\n' or '\r' in filenames
2074 2073 l = fp.readline()
2075 2074 try:
2076 2075 name, size = l.split('\0', 1)
2077 2076 size = int(size)
2078 2077 except (ValueError, TypeError):
2079 2078 raise util.UnexpectedOutput(
2080 2079 _('Unexpected response from remote server:'), l)
2081 2080 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2082 2081 ofp = self.sopener(name, 'w')
2083 2082 for chunk in util.filechunkiter(fp, limit=size):
2084 2083 ofp.write(chunk)
2085 2084 ofp.close()
2086 2085 elapsed = time.time() - start
2087 2086 if elapsed <= 0:
2088 2087 elapsed = 0.001
2089 2088 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2090 2089 (util.bytecount(total_bytes), elapsed,
2091 2090 util.bytecount(total_bytes / elapsed)))
2092 2091 self.invalidate()
2093 2092 return len(self.heads()) + 1
2094 2093
2095 2094 def clone(self, remote, heads=[], stream=False):
2096 2095 '''clone remote repository.
2097 2096
2098 2097 keyword arguments:
2099 2098 heads: list of revs to clone (forces use of pull)
2100 2099 stream: use streaming clone if possible'''
2101 2100
2102 2101 # now, all clients that can request uncompressed clones can
2103 2102 # read repo formats supported by all servers that can serve
2104 2103 # them.
2105 2104
2106 2105 # if revlog format changes, client will have to check version
2107 2106 # and format flags on "stream" capability, and use
2108 2107 # uncompressed only if compatible.
2109 2108
2110 2109 if stream and not heads and remote.capable('stream'):
2111 2110 return self.stream_in(remote)
2112 2111 return self.pull(remote, heads)
2113 2112
2114 2113 # used to avoid circular references so destructors work
2115 2114 def aftertrans(files):
2116 2115 renamefiles = [tuple(t) for t in files]
2117 2116 def a():
2118 2117 for src, dest in renamefiles:
2119 2118 util.rename(src, dest)
2120 2119 return a
2121 2120
2122 2121 def instance(ui, path, create):
2123 2122 return localrepository(ui, util.drop_scheme('file', path), create)
2124 2123
2125 2124 def islocal(path):
2126 2125 return True
@@ -1,136 +1,135 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import changegroup, os
10 10 from node import nullrev, short
11 11 from i18n import _
12 12
13 13 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
14 14 """create a bundle with the specified revisions as a backup"""
15 15 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
16 16 backupdir = repo.join("strip-backup")
17 17 if not os.path.isdir(backupdir):
18 18 os.mkdir(backupdir)
19 19 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
20 20 repo.ui.warn(_("saving bundle to %s\n") % name)
21 21 return changegroup.writebundle(cg, name, "HG10BZ")
22 22
23 23 def _collectfiles(repo, striprev):
24 24 """find out the filelogs affected by the strip"""
25 25 files = {}
26 26
27 27 for x in xrange(striprev, len(repo)):
28 28 for name in repo[x].files():
29 29 if name in files:
30 30 continue
31 31 files[name] = 1
32 32
33 33 files = files.keys()
34 34 files.sort()
35 35 return files
36 36
37 37 def _collectextranodes(repo, files, link):
38 38 """return the nodes that have to be saved before the strip"""
39 39 def collectone(revlog):
40 40 extra = []
41 41 startrev = count = len(revlog)
42 42 # find the truncation point of the revlog
43 43 for i in xrange(0, count):
44 node = revlog.node(i)
45 lrev = revlog.linkrev(node)
44 lrev = revlog.linkrev(i)
46 45 if lrev >= link:
47 46 startrev = i + 1
48 47 break
49 48
50 49 # see if any revision after that point has a linkrev less than link
51 50 # (we have to manually save these guys)
52 51 for i in xrange(startrev, count):
53 52 node = revlog.node(i)
54 lrev = revlog.linkrev(node)
53 lrev = revlog.linkrev(i)
55 54 if lrev < link:
56 55 extra.append((node, cl.node(lrev)))
57 56
58 57 return extra
59 58
60 59 extranodes = {}
61 60 cl = repo.changelog
62 61 extra = collectone(repo.manifest)
63 62 if extra:
64 63 extranodes[1] = extra
65 64 for fname in files:
66 65 f = repo.file(fname)
67 66 extra = collectone(f)
68 67 if extra:
69 68 extranodes[fname] = extra
70 69
71 70 return extranodes
72 71
73 72 def strip(ui, repo, node, backup="all"):
74 73 cl = repo.changelog
75 74 # TODO delete the undo files, and handle undo of merge sets
76 75 striprev = cl.rev(node)
77 76
78 77 # Some revisions with rev > striprev may not be descendants of striprev.
79 78 # We have to find these revisions and put them in a bundle, so that
80 79 # we can restore them after the truncations.
81 80 # To create the bundle we use repo.changegroupsubset which requires
82 81 # the list of heads and bases of the set of interesting revisions.
83 82 # (head = revision in the set that has no descendant in the set;
84 83 # base = revision in the set that has no ancestor in the set)
85 84 tostrip = {striprev: 1}
86 85 saveheads = {}
87 86 savebases = []
88 87 for r in xrange(striprev + 1, len(cl)):
89 88 parents = cl.parentrevs(r)
90 89 if parents[0] in tostrip or parents[1] in tostrip:
91 90 # r is a descendant of striprev
92 91 tostrip[r] = 1
93 92 # if this is a merge and one of the parents does not descend
94 93 # from striprev, mark that parent as a savehead.
95 94 if parents[1] != nullrev:
96 95 for p in parents:
97 96 if p not in tostrip and p > striprev:
98 97 saveheads[p] = 1
99 98 else:
100 99 # if no parents of this revision will be stripped, mark it as
101 100 # a savebase
102 101 if parents[0] < striprev and parents[1] < striprev:
103 102 savebases.append(cl.node(r))
104 103
105 104 for p in parents:
106 105 if p in saveheads:
107 106 del saveheads[p]
108 107 saveheads[r] = 1
109 108
110 109 saveheads = [cl.node(r) for r in saveheads]
111 110 files = _collectfiles(repo, striprev)
112 111
113 112 extranodes = _collectextranodes(repo, files, striprev)
114 113
115 114 # create a changegroup for all the branches we need to keep
116 115 if backup == "all":
117 116 _bundle(repo, [node], cl.heads(), node, 'backup')
118 117 if saveheads or extranodes:
119 118 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
120 119 extranodes)
121 120
122 121 cl.strip(striprev)
123 122 repo.manifest.strip(striprev)
124 123 for name in files:
125 124 f = repo.file(name)
126 125 f.strip(striprev)
127 126
128 127 if saveheads or extranodes:
129 128 ui.status(_("adding branch\n"))
130 129 f = open(chgrpfile, "rb")
131 130 gen = changegroup.readbundle(f, chgrpfile)
132 131 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
133 132 f.close()
134 133 if backup != "strip":
135 134 os.unlink(chgrpfile)
136 135
@@ -1,1374 +1,1374 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import bin, hex, nullid, nullrev, short
14 14 from i18n import _
15 15 import changegroup, errno, ancestor, mdiff, parsers
16 16 import struct, util, zlib
17 17
18 18 _pack = struct.pack
19 19 _unpack = struct.unpack
20 20 _compress = zlib.compress
21 21 _decompress = zlib.decompress
22 22 _sha = util.sha1
23 23
24 24 # revlog flags
25 25 REVLOGV0 = 0
26 26 REVLOGNG = 1
27 27 REVLOGNGINLINEDATA = (1 << 16)
28 28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31 31
32 32 class RevlogError(Exception):
33 33 pass
34 34
35 35 class LookupError(RevlogError, KeyError):
36 36 def __init__(self, name, index, message):
37 37 self.name = name
38 38 if isinstance(name, str) and len(name) == 20:
39 39 name = short(name)
40 40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41 41
42 42 def __str__(self):
43 43 return RevlogError.__str__(self)
44 44
45 45 def getoffset(q):
46 46 return int(q >> 16)
47 47
48 48 def gettype(q):
49 49 return int(q & 0xFFFF)
50 50
51 51 def offset_type(offset, type):
52 52 return long(long(offset) << 16 | type)
53 53
54 54 def hash(text, p1, p2):
55 55 """generate a hash from the given text and its parent hashes
56 56
57 57 This hash combines both the current file contents and its history
58 58 in a manner that makes it easy to distinguish nodes with the same
59 59 content in the revision graph.
60 60 """
61 61 l = [p1, p2]
62 62 l.sort()
63 63 s = _sha(l[0])
64 64 s.update(l[1])
65 65 s.update(text)
66 66 return s.digest()
67 67
68 68 def compress(text):
69 69 """ generate a possibly-compressed representation of text """
70 70 if not text:
71 71 return ("", text)
72 72 l = len(text)
73 73 bin = None
74 74 if l < 44:
75 75 pass
76 76 elif l > 1000000:
77 77 # zlib makes an internal copy, thus doubling memory usage for
78 78 # large files, so lets do this in pieces
79 79 z = zlib.compressobj()
80 80 p = []
81 81 pos = 0
82 82 while pos < l:
83 83 pos2 = pos + 2**20
84 84 p.append(z.compress(text[pos:pos2]))
85 85 pos = pos2
86 86 p.append(z.flush())
87 87 if sum(map(len, p)) < l:
88 88 bin = "".join(p)
89 89 else:
90 90 bin = _compress(text)
91 91 if bin is None or len(bin) > l:
92 92 if text[0] == '\0':
93 93 return ("", text)
94 94 return ('u', text)
95 95 return ("", bin)
96 96
97 97 def decompress(bin):
98 98 """ decompress the given input """
99 99 if not bin:
100 100 return bin
101 101 t = bin[0]
102 102 if t == '\0':
103 103 return bin
104 104 if t == 'x':
105 105 return _decompress(bin)
106 106 if t == 'u':
107 107 return bin[1:]
108 108 raise RevlogError(_("unknown compression type %r") % t)
109 109
110 110 class lazyparser(object):
111 111 """
112 112 this class avoids the need to parse the entirety of large indices
113 113 """
114 114
115 115 # lazyparser is not safe to use on windows if win32 extensions not
116 116 # available. it keeps file handle open, which make it not possible
117 117 # to break hardlinks on local cloned repos.
118 118
119 119 def __init__(self, dataf, size):
120 120 self.dataf = dataf
121 121 self.s = struct.calcsize(indexformatng)
122 122 self.datasize = size
123 123 self.l = size/self.s
124 124 self.index = [None] * self.l
125 125 self.map = {nullid: nullrev}
126 126 self.allmap = 0
127 127 self.all = 0
128 128 self.mapfind_count = 0
129 129
130 130 def loadmap(self):
131 131 """
132 132 during a commit, we need to make sure the rev being added is
133 133 not a duplicate. This requires loading the entire index,
134 134 which is fairly slow. loadmap can load up just the node map,
135 135 which takes much less time.
136 136 """
137 137 if self.allmap:
138 138 return
139 139 end = self.datasize
140 140 self.allmap = 1
141 141 cur = 0
142 142 count = 0
143 143 blocksize = self.s * 256
144 144 self.dataf.seek(0)
145 145 while cur < end:
146 146 data = self.dataf.read(blocksize)
147 147 off = 0
148 148 for x in xrange(256):
149 149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 150 self.map[n] = count
151 151 count += 1
152 152 if count >= self.l:
153 153 break
154 154 off += self.s
155 155 cur += blocksize
156 156
157 157 def loadblock(self, blockstart, blocksize, data=None):
158 158 if self.all:
159 159 return
160 160 if data is None:
161 161 self.dataf.seek(blockstart)
162 162 if blockstart + blocksize > self.datasize:
163 163 # the revlog may have grown since we've started running,
164 164 # but we don't have space in self.index for more entries.
165 165 # limit blocksize so that we don't get too much data.
166 166 blocksize = max(self.datasize - blockstart, 0)
167 167 data = self.dataf.read(blocksize)
168 168 lend = len(data) / self.s
169 169 i = blockstart / self.s
170 170 off = 0
171 171 # lazyindex supports __delitem__
172 172 if lend > len(self.index) - i:
173 173 lend = len(self.index) - i
174 174 for x in xrange(lend):
175 175 if self.index[i + x] == None:
176 176 b = data[off : off + self.s]
177 177 self.index[i + x] = b
178 178 n = b[ngshaoffset:ngshaoffset + 20]
179 179 self.map[n] = i + x
180 180 off += self.s
181 181
182 182 def findnode(self, node):
183 183 """search backwards through the index file for a specific node"""
184 184 if self.allmap:
185 185 return None
186 186
187 187 # hg log will cause many many searches for the manifest
188 188 # nodes. After we get called a few times, just load the whole
189 189 # thing.
190 190 if self.mapfind_count > 8:
191 191 self.loadmap()
192 192 if node in self.map:
193 193 return node
194 194 return None
195 195 self.mapfind_count += 1
196 196 last = self.l - 1
197 197 while self.index[last] != None:
198 198 if last == 0:
199 199 self.all = 1
200 200 self.allmap = 1
201 201 return None
202 202 last -= 1
203 203 end = (last + 1) * self.s
204 204 blocksize = self.s * 256
205 205 while end >= 0:
206 206 start = max(end - blocksize, 0)
207 207 self.dataf.seek(start)
208 208 data = self.dataf.read(end - start)
209 209 findend = end - start
210 210 while True:
211 211 # we're searching backwards, so we have to make sure
212 212 # we don't find a changeset where this node is a parent
213 213 off = data.find(node, 0, findend)
214 214 findend = off
215 215 if off >= 0:
216 216 i = off / self.s
217 217 off = i * self.s
218 218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 219 if n == node:
220 220 self.map[n] = i + start / self.s
221 221 return node
222 222 else:
223 223 break
224 224 end -= blocksize
225 225 return None
226 226
227 227 def loadindex(self, i=None, end=None):
228 228 if self.all:
229 229 return
230 230 all = False
231 231 if i == None:
232 232 blockstart = 0
233 233 blocksize = (65536 / self.s) * self.s
234 234 end = self.datasize
235 235 all = True
236 236 else:
237 237 if end:
238 238 blockstart = i * self.s
239 239 end = end * self.s
240 240 blocksize = end - blockstart
241 241 else:
242 242 blockstart = (i & ~1023) * self.s
243 243 blocksize = self.s * 1024
244 244 end = blockstart + blocksize
245 245 while blockstart < end:
246 246 self.loadblock(blockstart, blocksize)
247 247 blockstart += blocksize
248 248 if all:
249 249 self.all = True
250 250
251 251 class lazyindex(object):
252 252 """a lazy version of the index array"""
253 253 def __init__(self, parser):
254 254 self.p = parser
255 255 def __len__(self):
256 256 return len(self.p.index)
257 257 def load(self, pos):
258 258 if pos < 0:
259 259 pos += len(self.p.index)
260 260 self.p.loadindex(pos)
261 261 return self.p.index[pos]
262 262 def __getitem__(self, pos):
263 263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 264 def __setitem__(self, pos, item):
265 265 self.p.index[pos] = _pack(indexformatng, *item)
266 266 def __delitem__(self, pos):
267 267 del self.p.index[pos]
268 268 def insert(self, pos, e):
269 269 self.p.index.insert(pos, _pack(indexformatng, *e))
270 270 def append(self, e):
271 271 self.p.index.append(_pack(indexformatng, *e))
272 272
273 273 class lazymap(object):
274 274 """a lazy version of the node map"""
275 275 def __init__(self, parser):
276 276 self.p = parser
277 277 def load(self, key):
278 278 n = self.p.findnode(key)
279 279 if n == None:
280 280 raise KeyError(key)
281 281 def __contains__(self, key):
282 282 if key in self.p.map:
283 283 return True
284 284 self.p.loadmap()
285 285 return key in self.p.map
286 286 def __iter__(self):
287 287 yield nullid
288 288 for i in xrange(self.p.l):
289 289 ret = self.p.index[i]
290 290 if not ret:
291 291 self.p.loadindex(i)
292 292 ret = self.p.index[i]
293 293 if isinstance(ret, str):
294 294 ret = _unpack(indexformatng, ret)
295 295 yield ret[7]
296 296 def __getitem__(self, key):
297 297 try:
298 298 return self.p.map[key]
299 299 except KeyError:
300 300 try:
301 301 self.load(key)
302 302 return self.p.map[key]
303 303 except KeyError:
304 304 raise KeyError("node " + hex(key))
305 305 def __setitem__(self, key, val):
306 306 self.p.map[key] = val
307 307 def __delitem__(self, key):
308 308 del self.p.map[key]
309 309
310 310 indexformatv0 = ">4l20s20s20s"
311 311 v0shaoffset = 56
312 312
313 313 class revlogoldio(object):
314 314 def __init__(self):
315 315 self.size = struct.calcsize(indexformatv0)
316 316
317 317 def parseindex(self, fp, inline):
318 318 s = self.size
319 319 index = []
320 320 nodemap = {nullid: nullrev}
321 321 n = off = 0
322 322 data = fp.read()
323 323 l = len(data)
324 324 while off + s <= l:
325 325 cur = data[off:off + s]
326 326 off += s
327 327 e = _unpack(indexformatv0, cur)
328 328 # transform to revlogv1 format
329 329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 331 index.append(e2)
332 332 nodemap[e[6]] = n
333 333 n += 1
334 334
335 335 return index, nodemap, None
336 336
337 337 def packentry(self, entry, node, version, rev):
338 338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 339 node(entry[5]), node(entry[6]), entry[7])
340 340 return _pack(indexformatv0, *e2)
341 341
342 342 # index ng:
343 343 # 6 bytes offset
344 344 # 2 bytes flags
345 345 # 4 bytes compressed length
346 346 # 4 bytes uncompressed length
347 347 # 4 bytes: base rev
348 348 # 4 bytes link rev
349 349 # 4 bytes parent 1 rev
350 350 # 4 bytes parent 2 rev
351 351 # 32 bytes: nodeid
352 352 indexformatng = ">Qiiiiii20s12x"
353 353 ngshaoffset = 32
354 354 versionformat = ">I"
355 355
356 356 class revlogio(object):
357 357 def __init__(self):
358 358 self.size = struct.calcsize(indexformatng)
359 359
360 360 def parseindex(self, fp, inline):
361 361 try:
362 362 size = util.fstat(fp).st_size
363 363 except AttributeError:
364 364 size = 0
365 365
366 366 if util.openhardlinks() and not inline and size > 1000000:
367 367 # big index, let's parse it on demand
368 368 parser = lazyparser(fp, size)
369 369 index = lazyindex(parser)
370 370 nodemap = lazymap(parser)
371 371 e = list(index[0])
372 372 type = gettype(e[0])
373 373 e[0] = offset_type(0, type)
374 374 index[0] = e
375 375 return index, nodemap, None
376 376
377 377 data = fp.read()
378 378 # call the C implementation to parse the index data
379 379 index, nodemap, cache = parsers.parse_index(data, inline)
380 380 return index, nodemap, cache
381 381
382 382 def packentry(self, entry, node, version, rev):
383 383 p = _pack(indexformatng, *entry)
384 384 if rev == 0:
385 385 p = _pack(versionformat, version) + p[4:]
386 386 return p
387 387
388 388 class revlog(object):
389 389 """
390 390 the underlying revision storage object
391 391
392 392 A revlog consists of two parts, an index and the revision data.
393 393
394 394 The index is a file with a fixed record size containing
395 395 information on each revision, including its nodeid (hash), the
396 396 nodeids of its parents, the position and offset of its data within
397 397 the data file, and the revision it's based on. Finally, each entry
398 398 contains a linkrev entry that can serve as a pointer to external
399 399 data.
400 400
401 401 The revision data itself is a linear collection of data chunks.
402 402 Each chunk represents a revision and is usually represented as a
403 403 delta against the previous chunk. To bound lookup time, runs of
404 404 deltas are limited to about 2 times the length of the original
405 405 version data. This makes retrieval of a version proportional to
406 406 its size, or O(1) relative to the number of revisions.
407 407
408 408 Both pieces of the revlog are written to in an append-only
409 409 fashion, which means we never need to rewrite a file to insert or
410 410 remove data, and can use some simple techniques to avoid the need
411 411 for locking while reading.
412 412 """
413 413 def __init__(self, opener, indexfile):
414 414 """
415 415 create a revlog object
416 416
417 417 opener is a function that abstracts the file opening operation
418 418 and can be used to implement COW semantics or the like.
419 419 """
420 420 self.indexfile = indexfile
421 421 self.datafile = indexfile[:-2] + ".d"
422 422 self.opener = opener
423 423 self._cache = None
424 424 self._chunkcache = None
425 425 self.nodemap = {nullid: nullrev}
426 426 self.index = []
427 427
428 428 v = REVLOG_DEFAULT_VERSION
429 429 if hasattr(opener, "defversion"):
430 430 v = opener.defversion
431 431 if v & REVLOGNG:
432 432 v |= REVLOGNGINLINEDATA
433 433
434 434 i = ""
435 435 try:
436 436 f = self.opener(self.indexfile)
437 437 i = f.read(4)
438 438 f.seek(0)
439 439 if len(i) > 0:
440 440 v = struct.unpack(versionformat, i)[0]
441 441 except IOError, inst:
442 442 if inst.errno != errno.ENOENT:
443 443 raise
444 444
445 445 self.version = v
446 446 self._inline = v & REVLOGNGINLINEDATA
447 447 flags = v & ~0xFFFF
448 448 fmt = v & 0xFFFF
449 449 if fmt == REVLOGV0 and flags:
450 450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
451 451 % (self.indexfile, flags >> 16))
452 452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
453 453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
454 454 % (self.indexfile, flags >> 16))
455 455 elif fmt > REVLOGNG:
456 456 raise RevlogError(_("index %s unknown format %d")
457 457 % (self.indexfile, fmt))
458 458
459 459 self._io = revlogio()
460 460 if self.version == REVLOGV0:
461 461 self._io = revlogoldio()
462 462 if i:
463 463 d = self._io.parseindex(f, self._inline)
464 464 self.index, self.nodemap, self._chunkcache = d
465 465
466 466 # add the magic null revision at -1 (if it hasn't been done already)
467 467 if (self.index == [] or isinstance(self.index, lazyindex) or
468 468 self.index[-1][7] != nullid) :
469 469 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
470 470
471 471 def _loadindex(self, start, end):
472 472 """load a block of indexes all at once from the lazy parser"""
473 473 if isinstance(self.index, lazyindex):
474 474 self.index.p.loadindex(start, end)
475 475
476 476 def _loadindexmap(self):
477 477 """loads both the map and the index from the lazy parser"""
478 478 if isinstance(self.index, lazyindex):
479 479 p = self.index.p
480 480 p.loadindex()
481 481 self.nodemap = p.map
482 482
483 483 def _loadmap(self):
484 484 """loads the map from the lazy parser"""
485 485 if isinstance(self.nodemap, lazymap):
486 486 self.nodemap.p.loadmap()
487 487 self.nodemap = self.nodemap.p.map
488 488
489 489 def tip(self):
490 490 return self.node(len(self.index) - 2)
491 491 def __len__(self):
492 492 return len(self.index) - 1
493 493 def __iter__(self):
494 494 for i in xrange(len(self)):
495 495 yield i
496 496 def rev(self, node):
497 497 try:
498 498 return self.nodemap[node]
499 499 except KeyError:
500 500 raise LookupError(node, self.indexfile, _('no node'))
501 501 def node(self, rev):
502 502 return self.index[rev][7]
503 def linkrev(self, node):
504 return self.index[self.rev(node)][4]
503 def linkrev(self, rev):
504 return self.index[rev][4]
505 505 def parents(self, node):
506 506 d = self.index[self.rev(node)][5:7]
507 507 return (self.node(d[0]), self.node(d[1]))
508 508 def parentrevs(self, rev):
509 509 return self.index[rev][5:7]
510 510 def start(self, rev):
511 511 return int(self.index[rev][0] >> 16)
512 512 def end(self, rev):
513 513 return self.start(rev) + self.length(rev)
514 514 def length(self, rev):
515 515 return self.index[rev][1]
516 516 def base(self, rev):
517 517 return self.index[rev][3]
518 518
519 519 def size(self, rev):
520 520 """return the length of the uncompressed text for a given revision"""
521 521 l = self.index[rev][2]
522 522 if l >= 0:
523 523 return l
524 524
525 525 t = self.revision(self.node(rev))
526 526 return len(t)
527 527
528 528 # alternate implementation, The advantage to this code is it
529 529 # will be faster for a single revision. But, the results are not
530 530 # cached, so finding the size of every revision will be slower.
531 531 """
532 532 if self.cache and self.cache[1] == rev:
533 533 return len(self.cache[2])
534 534
535 535 base = self.base(rev)
536 536 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
537 537 base = self.cache[1]
538 538 text = self.cache[2]
539 539 else:
540 540 text = self.revision(self.node(base))
541 541
542 542 l = len(text)
543 543 for x in xrange(base + 1, rev + 1):
544 544 l = mdiff.patchedsize(l, self.chunk(x))
545 545 return l
546 546 """
547 547
548 548 def reachable(self, node, stop=None):
549 549 """return a hash of all nodes ancestral to a given node, including
550 550 the node itself, stopping when stop is matched"""
551 551 reachable = {}
552 552 visit = [node]
553 553 reachable[node] = 1
554 554 if stop:
555 555 stopn = self.rev(stop)
556 556 else:
557 557 stopn = 0
558 558 while visit:
559 559 n = visit.pop(0)
560 560 if n == stop:
561 561 continue
562 562 if n == nullid:
563 563 continue
564 564 for p in self.parents(n):
565 565 if self.rev(p) < stopn:
566 566 continue
567 567 if p not in reachable:
568 568 reachable[p] = 1
569 569 visit.append(p)
570 570 return reachable
571 571
572 572 def ancestors(self, *revs):
573 573 'Generate the ancestors of revs using a breadth-first visit'
574 574 visit = list(revs)
575 575 seen = util.set([nullrev])
576 576 while visit:
577 577 for parent in self.parentrevs(visit.pop(0)):
578 578 if parent not in seen:
579 579 visit.append(parent)
580 580 seen.add(parent)
581 581 yield parent
582 582
583 583 def descendants(self, *revs):
584 584 'Generate the descendants of revs in topological order'
585 585 seen = util.set(revs)
586 586 for i in xrange(min(revs) + 1, len(self)):
587 587 for x in self.parentrevs(i):
588 588 if x != nullrev and x in seen:
589 589 seen.add(i)
590 590 yield i
591 591 break
592 592
593 593 def findmissing(self, common=None, heads=None):
594 594 '''
595 595 returns the topologically sorted list of nodes from the set:
596 596 missing = (ancestors(heads) \ ancestors(common))
597 597
598 598 where ancestors() is the set of ancestors from heads, heads included
599 599
600 600 if heads is None, the heads of the revlog are used
601 601 if common is None, nullid is assumed to be a common node
602 602 '''
603 603 if common is None:
604 604 common = [nullid]
605 605 if heads is None:
606 606 heads = self.heads()
607 607
608 608 common = [self.rev(n) for n in common]
609 609 heads = [self.rev(n) for n in heads]
610 610
611 611 # we want the ancestors, but inclusive
612 612 has = dict.fromkeys(self.ancestors(*common))
613 613 has[nullrev] = None
614 614 for r in common:
615 615 has[r] = None
616 616
617 617 # take all ancestors from heads that aren't in has
618 618 missing = {}
619 619 visit = [r for r in heads if r not in has]
620 620 while visit:
621 621 r = visit.pop(0)
622 622 if r in missing:
623 623 continue
624 624 else:
625 625 missing[r] = None
626 626 for p in self.parentrevs(r):
627 627 if p not in has:
628 628 visit.append(p)
629 629 missing = missing.keys()
630 630 missing.sort()
631 631 return [self.node(r) for r in missing]
632 632
633 633 def nodesbetween(self, roots=None, heads=None):
634 634 """Return a tuple containing three elements. Elements 1 and 2 contain
635 635 a final list bases and heads after all the unreachable ones have been
636 636 pruned. Element 0 contains a topologically sorted list of all
637 637
638 638 nodes that satisfy these constraints:
639 639 1. All nodes must be descended from a node in roots (the nodes on
640 640 roots are considered descended from themselves).
641 641 2. All nodes must also be ancestors of a node in heads (the nodes in
642 642 heads are considered to be their own ancestors).
643 643
644 644 If roots is unspecified, nullid is assumed as the only root.
645 645 If heads is unspecified, it is taken to be the output of the
646 646 heads method (i.e. a list of all nodes in the repository that
647 647 have no children)."""
648 648 nonodes = ([], [], [])
649 649 if roots is not None:
650 650 roots = list(roots)
651 651 if not roots:
652 652 return nonodes
653 653 lowestrev = min([self.rev(n) for n in roots])
654 654 else:
655 655 roots = [nullid] # Everybody's a descendent of nullid
656 656 lowestrev = nullrev
657 657 if (lowestrev == nullrev) and (heads is None):
658 658 # We want _all_ the nodes!
659 659 return ([self.node(r) for r in self], [nullid], list(self.heads()))
660 660 if heads is None:
661 661 # All nodes are ancestors, so the latest ancestor is the last
662 662 # node.
663 663 highestrev = len(self) - 1
664 664 # Set ancestors to None to signal that every node is an ancestor.
665 665 ancestors = None
666 666 # Set heads to an empty dictionary for later discovery of heads
667 667 heads = {}
668 668 else:
669 669 heads = list(heads)
670 670 if not heads:
671 671 return nonodes
672 672 ancestors = {}
673 673 # Turn heads into a dictionary so we can remove 'fake' heads.
674 674 # Also, later we will be using it to filter out the heads we can't
675 675 # find from roots.
676 676 heads = dict.fromkeys(heads, 0)
677 677 # Start at the top and keep marking parents until we're done.
678 678 nodestotag = heads.keys()
679 679 # Remember where the top was so we can use it as a limit later.
680 680 highestrev = max([self.rev(n) for n in nodestotag])
681 681 while nodestotag:
682 682 # grab a node to tag
683 683 n = nodestotag.pop()
684 684 # Never tag nullid
685 685 if n == nullid:
686 686 continue
687 687 # A node's revision number represents its place in a
688 688 # topologically sorted list of nodes.
689 689 r = self.rev(n)
690 690 if r >= lowestrev:
691 691 if n not in ancestors:
692 692 # If we are possibly a descendent of one of the roots
693 693 # and we haven't already been marked as an ancestor
694 694 ancestors[n] = 1 # Mark as ancestor
695 695 # Add non-nullid parents to list of nodes to tag.
696 696 nodestotag.extend([p for p in self.parents(n) if
697 697 p != nullid])
698 698 elif n in heads: # We've seen it before, is it a fake head?
699 699 # So it is, real heads should not be the ancestors of
700 700 # any other heads.
701 701 heads.pop(n)
702 702 if not ancestors:
703 703 return nonodes
704 704 # Now that we have our set of ancestors, we want to remove any
705 705 # roots that are not ancestors.
706 706
707 707 # If one of the roots was nullid, everything is included anyway.
708 708 if lowestrev > nullrev:
709 709 # But, since we weren't, let's recompute the lowest rev to not
710 710 # include roots that aren't ancestors.
711 711
712 712 # Filter out roots that aren't ancestors of heads
713 713 roots = [n for n in roots if n in ancestors]
714 714 # Recompute the lowest revision
715 715 if roots:
716 716 lowestrev = min([self.rev(n) for n in roots])
717 717 else:
718 718 # No more roots? Return empty list
719 719 return nonodes
720 720 else:
721 721 # We are descending from nullid, and don't need to care about
722 722 # any other roots.
723 723 lowestrev = nullrev
724 724 roots = [nullid]
725 725 # Transform our roots list into a 'set' (i.e. a dictionary where the
726 726 # values don't matter.
727 727 descendents = dict.fromkeys(roots, 1)
728 728 # Also, keep the original roots so we can filter out roots that aren't
729 729 # 'real' roots (i.e. are descended from other roots).
730 730 roots = descendents.copy()
731 731 # Our topologically sorted list of output nodes.
732 732 orderedout = []
733 733 # Don't start at nullid since we don't want nullid in our output list,
734 734 # and if nullid shows up in descedents, empty parents will look like
735 735 # they're descendents.
736 736 for r in xrange(max(lowestrev, 0), highestrev + 1):
737 737 n = self.node(r)
738 738 isdescendent = False
739 739 if lowestrev == nullrev: # Everybody is a descendent of nullid
740 740 isdescendent = True
741 741 elif n in descendents:
742 742 # n is already a descendent
743 743 isdescendent = True
744 744 # This check only needs to be done here because all the roots
745 745 # will start being marked is descendents before the loop.
746 746 if n in roots:
747 747 # If n was a root, check if it's a 'real' root.
748 748 p = tuple(self.parents(n))
749 749 # If any of its parents are descendents, it's not a root.
750 750 if (p[0] in descendents) or (p[1] in descendents):
751 751 roots.pop(n)
752 752 else:
753 753 p = tuple(self.parents(n))
754 754 # A node is a descendent if either of its parents are
755 755 # descendents. (We seeded the dependents list with the roots
756 756 # up there, remember?)
757 757 if (p[0] in descendents) or (p[1] in descendents):
758 758 descendents[n] = 1
759 759 isdescendent = True
760 760 if isdescendent and ((ancestors is None) or (n in ancestors)):
761 761 # Only include nodes that are both descendents and ancestors.
762 762 orderedout.append(n)
763 763 if (ancestors is not None) and (n in heads):
764 764 # We're trying to figure out which heads are reachable
765 765 # from roots.
766 766 # Mark this head as having been reached
767 767 heads[n] = 1
768 768 elif ancestors is None:
769 769 # Otherwise, we're trying to discover the heads.
770 770 # Assume this is a head because if it isn't, the next step
771 771 # will eventually remove it.
772 772 heads[n] = 1
773 773 # But, obviously its parents aren't.
774 774 for p in self.parents(n):
775 775 heads.pop(p, None)
776 776 heads = [n for n in heads.iterkeys() if heads[n] != 0]
777 777 roots = roots.keys()
778 778 assert orderedout
779 779 assert roots
780 780 assert heads
781 781 return (orderedout, roots, heads)
782 782
783 783 def heads(self, start=None, stop=None):
784 784 """return the list of all nodes that have no children
785 785
786 786 if start is specified, only heads that are descendants of
787 787 start will be returned
788 788 if stop is specified, it will consider all the revs from stop
789 789 as if they had no children
790 790 """
791 791 if start is None and stop is None:
792 792 count = len(self)
793 793 if not count:
794 794 return [nullid]
795 795 ishead = [1] * (count + 1)
796 796 index = self.index
797 797 for r in xrange(count):
798 798 e = index[r]
799 799 ishead[e[5]] = ishead[e[6]] = 0
800 800 return [self.node(r) for r in xrange(count) if ishead[r]]
801 801
802 802 if start is None:
803 803 start = nullid
804 804 if stop is None:
805 805 stop = []
806 806 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
807 807 startrev = self.rev(start)
808 808 reachable = {startrev: 1}
809 809 heads = {startrev: 1}
810 810
811 811 parentrevs = self.parentrevs
812 812 for r in xrange(startrev + 1, len(self)):
813 813 for p in parentrevs(r):
814 814 if p in reachable:
815 815 if r not in stoprevs:
816 816 reachable[r] = 1
817 817 heads[r] = 1
818 818 if p in heads and p not in stoprevs:
819 819 del heads[p]
820 820
821 821 return [self.node(r) for r in heads]
822 822
823 823 def children(self, node):
824 824 """find the children of a given node"""
825 825 c = []
826 826 p = self.rev(node)
827 827 for r in range(p + 1, len(self)):
828 828 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
829 829 if prevs:
830 830 for pr in prevs:
831 831 if pr == p:
832 832 c.append(self.node(r))
833 833 elif p == nullrev:
834 834 c.append(self.node(r))
835 835 return c
836 836
837 837 def _match(self, id):
838 838 if isinstance(id, (long, int)):
839 839 # rev
840 840 return self.node(id)
841 841 if len(id) == 20:
842 842 # possibly a binary node
843 843 # odds of a binary node being all hex in ASCII are 1 in 10**25
844 844 try:
845 845 node = id
846 846 r = self.rev(node) # quick search the index
847 847 return node
848 848 except LookupError:
849 849 pass # may be partial hex id
850 850 try:
851 851 # str(rev)
852 852 rev = int(id)
853 853 if str(rev) != id:
854 854 raise ValueError
855 855 if rev < 0:
856 856 rev = len(self) + rev
857 857 if rev < 0 or rev >= len(self):
858 858 raise ValueError
859 859 return self.node(rev)
860 860 except (ValueError, OverflowError):
861 861 pass
862 862 if len(id) == 40:
863 863 try:
864 864 # a full hex nodeid?
865 865 node = bin(id)
866 866 r = self.rev(node)
867 867 return node
868 868 except (TypeError, LookupError):
869 869 pass
870 870
871 871 def _partialmatch(self, id):
872 872 if len(id) < 40:
873 873 try:
874 874 # hex(node)[:...]
875 875 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
876 876 node = None
877 877 for n in self.nodemap:
878 878 if n.startswith(bin_id) and hex(n).startswith(id):
879 879 if node is not None:
880 880 raise LookupError(id, self.indexfile,
881 881 _('ambiguous identifier'))
882 882 node = n
883 883 if node is not None:
884 884 return node
885 885 except TypeError:
886 886 pass
887 887
888 888 def lookup(self, id):
889 889 """locate a node based on:
890 890 - revision number or str(revision number)
891 891 - nodeid or subset of hex nodeid
892 892 """
893 893 n = self._match(id)
894 894 if n is not None:
895 895 return n
896 896 n = self._partialmatch(id)
897 897 if n:
898 898 return n
899 899
900 900 raise LookupError(id, self.indexfile, _('no match found'))
901 901
902 902 def cmp(self, node, text):
903 903 """compare text with a given file revision"""
904 904 p1, p2 = self.parents(node)
905 905 return hash(text, p1, p2) != node
906 906
907 907 def chunk(self, rev, df=None):
908 908 def loadcache(df):
909 909 if not df:
910 910 if self._inline:
911 911 df = self.opener(self.indexfile)
912 912 else:
913 913 df = self.opener(self.datafile)
914 914 df.seek(start)
915 915 self._chunkcache = (start, df.read(cache_length))
916 916
917 917 start, length = self.start(rev), self.length(rev)
918 918 if self._inline:
919 919 start += (rev + 1) * self._io.size
920 920 end = start + length
921 921
922 922 offset = 0
923 923 if not self._chunkcache:
924 924 cache_length = max(65536, length)
925 925 loadcache(df)
926 926 else:
927 927 cache_start = self._chunkcache[0]
928 928 cache_length = len(self._chunkcache[1])
929 929 cache_end = cache_start + cache_length
930 930 if start >= cache_start and end <= cache_end:
931 931 # it is cached
932 932 offset = start - cache_start
933 933 else:
934 934 cache_length = max(65536, length)
935 935 loadcache(df)
936 936
937 937 # avoid copying large chunks
938 938 c = self._chunkcache[1]
939 939 if cache_length != length:
940 940 c = c[offset:offset + length]
941 941
942 942 return decompress(c)
943 943
944 944 def delta(self, node):
945 945 """return or calculate a delta between a node and its predecessor"""
946 946 r = self.rev(node)
947 947 return self.revdiff(r - 1, r)
948 948
949 949 def revdiff(self, rev1, rev2):
950 950 """return or calculate a delta between two revisions"""
951 951 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
952 952 return self.chunk(rev2)
953 953
954 954 return mdiff.textdiff(self.revision(self.node(rev1)),
955 955 self.revision(self.node(rev2)))
956 956
957 957 def revision(self, node):
958 958 """return an uncompressed revision of a given node"""
959 959 if node == nullid:
960 960 return ""
961 961 if self._cache and self._cache[0] == node:
962 962 return str(self._cache[2])
963 963
964 964 # look up what we need to read
965 965 text = None
966 966 rev = self.rev(node)
967 967 base = self.base(rev)
968 968
969 969 # check rev flags
970 970 if self.index[rev][0] & 0xFFFF:
971 971 raise RevlogError(_('incompatible revision flag %x') %
972 972 (self.index[rev][0] & 0xFFFF))
973 973
974 974 df = None
975 975
976 976 # do we have useful data cached?
977 977 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
978 978 base = self._cache[1]
979 979 text = str(self._cache[2])
980 980 self._loadindex(base, rev + 1)
981 981 if not self._inline and rev > base + 1:
982 982 df = self.opener(self.datafile)
983 983 else:
984 984 self._loadindex(base, rev + 1)
985 985 if not self._inline and rev > base:
986 986 df = self.opener(self.datafile)
987 987 text = self.chunk(base, df=df)
988 988
989 989 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
990 990 text = mdiff.patches(text, bins)
991 991 p1, p2 = self.parents(node)
992 992 if node != hash(text, p1, p2):
993 993 raise RevlogError(_("integrity check failed on %s:%d")
994 994 % (self.datafile, rev))
995 995
996 996 self._cache = (node, rev, text)
997 997 return text
998 998
999 999 def checkinlinesize(self, tr, fp=None):
1000 1000 if not self._inline:
1001 1001 return
1002 1002 if not fp:
1003 1003 fp = self.opener(self.indexfile, 'r')
1004 1004 fp.seek(0, 2)
1005 1005 size = fp.tell()
1006 1006 if size < 131072:
1007 1007 return
1008 1008 trinfo = tr.find(self.indexfile)
1009 1009 if trinfo == None:
1010 1010 raise RevlogError(_("%s not found in the transaction")
1011 1011 % self.indexfile)
1012 1012
1013 1013 trindex = trinfo[2]
1014 1014 dataoff = self.start(trindex)
1015 1015
1016 1016 tr.add(self.datafile, dataoff)
1017 1017 df = self.opener(self.datafile, 'w')
1018 1018 try:
1019 1019 calc = self._io.size
1020 1020 for r in self:
1021 1021 start = self.start(r) + (r + 1) * calc
1022 1022 length = self.length(r)
1023 1023 fp.seek(start)
1024 1024 d = fp.read(length)
1025 1025 df.write(d)
1026 1026 finally:
1027 1027 df.close()
1028 1028
1029 1029 fp.close()
1030 1030 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1031 1031 self.version &= ~(REVLOGNGINLINEDATA)
1032 1032 self._inline = False
1033 1033 for i in self:
1034 1034 e = self._io.packentry(self.index[i], self.node, self.version, i)
1035 1035 fp.write(e)
1036 1036
1037 1037 # if we don't call rename, the temp file will never replace the
1038 1038 # real index
1039 1039 fp.rename()
1040 1040
1041 1041 tr.replace(self.indexfile, trindex * calc)
1042 1042 self._chunkcache = None
1043 1043
1044 1044 def addrevision(self, text, transaction, link, p1, p2, d=None):
1045 1045 """add a revision to the log
1046 1046
1047 1047 text - the revision data to add
1048 1048 transaction - the transaction object used for rollback
1049 1049 link - the linkrev data to add
1050 1050 p1, p2 - the parent nodeids of the revision
1051 1051 d - an optional precomputed delta
1052 1052 """
1053 1053 dfh = None
1054 1054 if not self._inline:
1055 1055 dfh = self.opener(self.datafile, "a")
1056 1056 ifh = self.opener(self.indexfile, "a+")
1057 1057 try:
1058 1058 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1059 1059 finally:
1060 1060 if dfh:
1061 1061 dfh.close()
1062 1062 ifh.close()
1063 1063
1064 1064 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1065 1065 node = hash(text, p1, p2)
1066 1066 if node in self.nodemap:
1067 1067 return node
1068 1068
1069 1069 curr = len(self)
1070 1070 prev = curr - 1
1071 1071 base = self.base(prev)
1072 1072 offset = self.end(prev)
1073 1073
1074 1074 if curr:
1075 1075 if not d:
1076 1076 ptext = self.revision(self.node(prev))
1077 1077 d = mdiff.textdiff(ptext, text)
1078 1078 data = compress(d)
1079 1079 l = len(data[1]) + len(data[0])
1080 1080 dist = l + offset - self.start(base)
1081 1081
1082 1082 # full versions are inserted when the needed deltas
1083 1083 # become comparable to the uncompressed text
1084 1084 if not curr or dist > len(text) * 2:
1085 1085 data = compress(text)
1086 1086 l = len(data[1]) + len(data[0])
1087 1087 base = curr
1088 1088
1089 1089 e = (offset_type(offset, 0), l, len(text),
1090 1090 base, link, self.rev(p1), self.rev(p2), node)
1091 1091 self.index.insert(-1, e)
1092 1092 self.nodemap[node] = curr
1093 1093
1094 1094 entry = self._io.packentry(e, self.node, self.version, curr)
1095 1095 if not self._inline:
1096 1096 transaction.add(self.datafile, offset)
1097 1097 transaction.add(self.indexfile, curr * len(entry))
1098 1098 if data[0]:
1099 1099 dfh.write(data[0])
1100 1100 dfh.write(data[1])
1101 1101 dfh.flush()
1102 1102 ifh.write(entry)
1103 1103 else:
1104 1104 offset += curr * self._io.size
1105 1105 transaction.add(self.indexfile, offset, curr)
1106 1106 ifh.write(entry)
1107 1107 ifh.write(data[0])
1108 1108 ifh.write(data[1])
1109 1109 self.checkinlinesize(transaction, ifh)
1110 1110
1111 1111 self._cache = (node, curr, text)
1112 1112 return node
1113 1113
1114 1114 def ancestor(self, a, b):
1115 1115 """calculate the least common ancestor of nodes a and b"""
1116 1116
1117 1117 def parents(rev):
1118 1118 return [p for p in self.parentrevs(rev) if p != nullrev]
1119 1119
1120 1120 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1121 1121 if c is None:
1122 1122 return nullid
1123 1123
1124 1124 return self.node(c)
1125 1125
1126 1126 def group(self, nodelist, lookup, infocollect=None):
1127 1127 """calculate a delta group
1128 1128
1129 1129 Given a list of changeset revs, return a set of deltas and
1130 1130 metadata corresponding to nodes. the first delta is
1131 1131 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1132 1132 have this parent as it has all history before these
1133 1133 changesets. parent is parent[0]
1134 1134 """
1135 1135 revs = [self.rev(n) for n in nodelist]
1136 1136
1137 1137 # if we don't have any revisions touched by these changesets, bail
1138 1138 if not revs:
1139 1139 yield changegroup.closechunk()
1140 1140 return
1141 1141
1142 1142 # add the parent of the first rev
1143 1143 p = self.parents(self.node(revs[0]))[0]
1144 1144 revs.insert(0, self.rev(p))
1145 1145
1146 1146 # build deltas
1147 1147 for d in xrange(0, len(revs) - 1):
1148 1148 a, b = revs[d], revs[d + 1]
1149 1149 nb = self.node(b)
1150 1150
1151 1151 if infocollect is not None:
1152 1152 infocollect(nb)
1153 1153
1154 1154 p = self.parents(nb)
1155 1155 meta = nb + p[0] + p[1] + lookup(nb)
1156 1156 if a == -1:
1157 1157 d = self.revision(nb)
1158 1158 meta += mdiff.trivialdiffheader(len(d))
1159 1159 else:
1160 1160 d = self.revdiff(a, b)
1161 1161 yield changegroup.chunkheader(len(meta) + len(d))
1162 1162 yield meta
1163 1163 if len(d) > 2**20:
1164 1164 pos = 0
1165 1165 while pos < len(d):
1166 1166 pos2 = pos + 2 ** 18
1167 1167 yield d[pos:pos2]
1168 1168 pos = pos2
1169 1169 else:
1170 1170 yield d
1171 1171
1172 1172 yield changegroup.closechunk()
1173 1173
1174 1174 def addgroup(self, revs, linkmapper, transaction):
1175 1175 """
1176 1176 add a delta group
1177 1177
1178 1178 given a set of deltas, add them to the revision log. the
1179 1179 first delta is against its parent, which should be in our
1180 1180 log, the rest are against the previous delta.
1181 1181 """
1182 1182
1183 1183 #track the base of the current delta log
1184 1184 r = len(self)
1185 1185 t = r - 1
1186 1186 node = None
1187 1187
1188 1188 base = prev = nullrev
1189 1189 start = end = textlen = 0
1190 1190 if r:
1191 1191 end = self.end(t)
1192 1192
1193 1193 ifh = self.opener(self.indexfile, "a+")
1194 1194 isize = r * self._io.size
1195 1195 if self._inline:
1196 1196 transaction.add(self.indexfile, end + isize, r)
1197 1197 dfh = None
1198 1198 else:
1199 1199 transaction.add(self.indexfile, isize, r)
1200 1200 transaction.add(self.datafile, end)
1201 1201 dfh = self.opener(self.datafile, "a")
1202 1202
1203 1203 try:
1204 1204 # loop through our set of deltas
1205 1205 chain = None
1206 1206 for chunk in revs:
1207 1207 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1208 1208 link = linkmapper(cs)
1209 1209 if node in self.nodemap:
1210 1210 # this can happen if two branches make the same change
1211 1211 chain = node
1212 1212 continue
1213 1213 delta = buffer(chunk, 80)
1214 1214 del chunk
1215 1215
1216 1216 for p in (p1, p2):
1217 1217 if not p in self.nodemap:
1218 1218 raise LookupError(p, self.indexfile, _('unknown parent'))
1219 1219
1220 1220 if not chain:
1221 1221 # retrieve the parent revision of the delta chain
1222 1222 chain = p1
1223 1223 if not chain in self.nodemap:
1224 1224 raise LookupError(chain, self.indexfile, _('unknown base'))
1225 1225
1226 1226 # full versions are inserted when the needed deltas become
1227 1227 # comparable to the uncompressed text or when the previous
1228 1228 # version is not the one we have a delta against. We use
1229 1229 # the size of the previous full rev as a proxy for the
1230 1230 # current size.
1231 1231
1232 1232 if chain == prev:
1233 1233 cdelta = compress(delta)
1234 1234 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1235 1235 textlen = mdiff.patchedsize(textlen, delta)
1236 1236
1237 1237 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1238 1238 # flush our writes here so we can read it in revision
1239 1239 if dfh:
1240 1240 dfh.flush()
1241 1241 ifh.flush()
1242 1242 text = self.revision(chain)
1243 1243 if len(text) == 0:
1244 1244 # skip over trivial delta header
1245 1245 text = buffer(delta, 12)
1246 1246 else:
1247 1247 text = mdiff.patches(text, [delta])
1248 1248 del delta
1249 1249 chk = self._addrevision(text, transaction, link, p1, p2, None,
1250 1250 ifh, dfh)
1251 1251 if not dfh and not self._inline:
1252 1252 # addrevision switched from inline to conventional
1253 1253 # reopen the index
1254 1254 dfh = self.opener(self.datafile, "a")
1255 1255 ifh = self.opener(self.indexfile, "a")
1256 1256 if chk != node:
1257 1257 raise RevlogError(_("consistency error adding group"))
1258 1258 textlen = len(text)
1259 1259 else:
1260 1260 e = (offset_type(end, 0), cdeltalen, textlen, base,
1261 1261 link, self.rev(p1), self.rev(p2), node)
1262 1262 self.index.insert(-1, e)
1263 1263 self.nodemap[node] = r
1264 1264 entry = self._io.packentry(e, self.node, self.version, r)
1265 1265 if self._inline:
1266 1266 ifh.write(entry)
1267 1267 ifh.write(cdelta[0])
1268 1268 ifh.write(cdelta[1])
1269 1269 self.checkinlinesize(transaction, ifh)
1270 1270 if not self._inline:
1271 1271 dfh = self.opener(self.datafile, "a")
1272 1272 ifh = self.opener(self.indexfile, "a")
1273 1273 else:
1274 1274 dfh.write(cdelta[0])
1275 1275 dfh.write(cdelta[1])
1276 1276 ifh.write(entry)
1277 1277
1278 1278 t, r, chain, prev = r, r + 1, node, node
1279 1279 base = self.base(t)
1280 1280 start = self.start(base)
1281 1281 end = self.end(t)
1282 1282 finally:
1283 1283 if dfh:
1284 1284 dfh.close()
1285 1285 ifh.close()
1286 1286
1287 1287 return node
1288 1288
1289 1289 def strip(self, minlink):
1290 1290 """truncate the revlog on the first revision with a linkrev >= minlink
1291 1291
1292 1292 This function is called when we're stripping revision minlink and
1293 1293 its descendants from the repository.
1294 1294
1295 1295 We have to remove all revisions with linkrev >= minlink, because
1296 1296 the equivalent changelog revisions will be renumbered after the
1297 1297 strip.
1298 1298
1299 1299 So we truncate the revlog on the first of these revisions, and
1300 1300 trust that the caller has saved the revisions that shouldn't be
1301 1301 removed and that it'll readd them after this truncation.
1302 1302 """
1303 1303 if len(self) == 0:
1304 1304 return
1305 1305
1306 1306 if isinstance(self.index, lazyindex):
1307 1307 self._loadindexmap()
1308 1308
1309 1309 for rev in self:
1310 1310 if self.index[rev][4] >= minlink:
1311 1311 break
1312 1312 else:
1313 1313 return
1314 1314
1315 1315 # first truncate the files on disk
1316 1316 end = self.start(rev)
1317 1317 if not self._inline:
1318 1318 df = self.opener(self.datafile, "a")
1319 1319 df.truncate(end)
1320 1320 end = rev * self._io.size
1321 1321 else:
1322 1322 end += rev * self._io.size
1323 1323
1324 1324 indexf = self.opener(self.indexfile, "a")
1325 1325 indexf.truncate(end)
1326 1326
1327 1327 # then reset internal state in memory to forget those revisions
1328 1328 self._cache = None
1329 1329 self._chunkcache = None
1330 1330 for x in xrange(rev, len(self)):
1331 1331 del self.nodemap[self.node(x)]
1332 1332
1333 1333 del self.index[rev:-1]
1334 1334
1335 1335 def checksize(self):
1336 1336 expected = 0
1337 1337 if len(self):
1338 1338 expected = max(0, self.end(len(self) - 1))
1339 1339
1340 1340 try:
1341 1341 f = self.opener(self.datafile)
1342 1342 f.seek(0, 2)
1343 1343 actual = f.tell()
1344 1344 dd = actual - expected
1345 1345 except IOError, inst:
1346 1346 if inst.errno != errno.ENOENT:
1347 1347 raise
1348 1348 dd = 0
1349 1349
1350 1350 try:
1351 1351 f = self.opener(self.indexfile)
1352 1352 f.seek(0, 2)
1353 1353 actual = f.tell()
1354 1354 s = self._io.size
1355 1355 i = max(0, actual / s)
1356 1356 di = actual - (i * s)
1357 1357 if self._inline:
1358 1358 databytes = 0
1359 1359 for r in self:
1360 1360 databytes += max(0, self.length(r))
1361 1361 dd = 0
1362 1362 di = actual - len(self) * s - databytes
1363 1363 except IOError, inst:
1364 1364 if inst.errno != errno.ENOENT:
1365 1365 raise
1366 1366 di = 0
1367 1367
1368 1368 return (dd, di)
1369 1369
1370 1370 def files(self):
1371 1371 res = [ self.indexfile ]
1372 1372 if not self._inline:
1373 1373 res.append(self.datafile)
1374 1374 return res
@@ -1,238 +1,239 b''
1 1 # verify.py - repository integrity checking for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import revlog, util
11 11
12 12 def verify(repo):
13 13 lock = repo.lock()
14 14 try:
15 15 return _verify(repo)
16 16 finally:
17 17 del lock
18 18
19 19 def _verify(repo):
20 20 mflinkrevs = {}
21 21 filelinkrevs = {}
22 22 filenodes = {}
23 23 revisions = 0
24 24 badrevs = {}
25 25 errors = [0]
26 26 warnings = [0]
27 27 ui = repo.ui
28 28 cl = repo.changelog
29 29 mf = repo.manifest
30 30
31 31 if not repo.cancopy():
32 32 raise util.Abort(_("cannot verify bundle or remote repos"))
33 33
34 34 def err(linkrev, msg, filename=None):
35 35 if linkrev != None:
36 36 badrevs[linkrev] = True
37 37 else:
38 38 linkrev = '?'
39 39 msg = "%s: %s" % (linkrev, msg)
40 40 if filename:
41 41 msg = "%s@%s" % (filename, msg)
42 42 ui.warn(" " + msg + "\n")
43 43 errors[0] += 1
44 44
45 45 def exc(linkrev, msg, inst, filename=None):
46 46 if isinstance(inst, KeyboardInterrupt):
47 47 ui.warn(_("interrupted"))
48 48 raise
49 49 err(linkrev, "%s: %s" % (msg, inst), filename)
50 50
51 51 def warn(msg):
52 52 ui.warn(msg + "\n")
53 53 warnings[0] += 1
54 54
55 55 def checklog(obj, name):
56 56 if not len(obj) and (havecl or havemf):
57 57 err(0, _("empty or missing %s") % name)
58 58 return
59 59
60 60 d = obj.checksize()
61 61 if d[0]:
62 62 err(None, _("data length off by %d bytes") % d[0], name)
63 63 if d[1]:
64 64 err(None, _("index contains %d extra bytes") % d[1], name)
65 65
66 66 if obj.version != revlog.REVLOGV0:
67 67 if not revlogv1:
68 68 warn(_("warning: `%s' uses revlog format 1") % name)
69 69 elif revlogv1:
70 70 warn(_("warning: `%s' uses revlog format 0") % name)
71 71
72 72 def checkentry(obj, i, node, seen, linkrevs, f):
73 lr = obj.linkrev(node)
73 lr = obj.linkrev(obj.rev(node))
74 74 if lr < 0 or (havecl and lr not in linkrevs):
75 75 t = "unexpected"
76 76 if lr < 0 or lr >= len(cl):
77 77 t = "nonexistent"
78 78 err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
79 79 if linkrevs:
80 80 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
81 81 lr = None # can't be trusted
82 82
83 83 try:
84 84 p1, p2 = obj.parents(node)
85 85 if p1 not in seen and p1 != nullid:
86 86 err(lr, _("unknown parent 1 %s of %s") %
87 87 (short(p1), short(n)), f)
88 88 if p2 not in seen and p2 != nullid:
89 89 err(lr, _("unknown parent 2 %s of %s") %
90 90 (short(p2), short(p1)), f)
91 91 except Exception, inst:
92 92 exc(lr, _("checking parents of %s") % short(node), inst, f)
93 93
94 94 if node in seen:
95 95 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
96 96 seen[n] = i
97 97 return lr
98 98
99 99 revlogv1 = cl.version != revlog.REVLOGV0
100 100 if ui.verbose or not revlogv1:
101 101 ui.status(_("repository uses revlog format %d\n") %
102 102 (revlogv1 and 1 or 0))
103 103
104 104 havecl = len(cl) > 0
105 105 havemf = len(mf) > 0
106 106
107 107 ui.status(_("checking changesets\n"))
108 108 seen = {}
109 109 checklog(cl, "changelog")
110 110 for i in repo:
111 111 n = cl.node(i)
112 112 checkentry(cl, i, n, seen, [i], "changelog")
113 113
114 114 try:
115 115 changes = cl.read(n)
116 116 mflinkrevs.setdefault(changes[0], []).append(i)
117 117 for f in changes[3]:
118 118 filelinkrevs.setdefault(f, []).append(i)
119 119 except Exception, inst:
120 120 exc(i, _("unpacking changeset %s") % short(n), inst)
121 121
122 122 ui.status(_("checking manifests\n"))
123 123 seen = {}
124 124 checklog(mf, "manifest")
125 125 for i in mf:
126 126 n = mf.node(i)
127 127 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
128 128 if n in mflinkrevs:
129 129 del mflinkrevs[n]
130 130
131 131 try:
132 132 for f, fn in mf.readdelta(n).iteritems():
133 133 if not f:
134 134 err(lr, _("file without name in manifest"))
135 135 elif f != "/dev/null":
136 136 fns = filenodes.setdefault(f, {})
137 137 if fn not in fns:
138 fns[fn] = n
138 fns[fn] = i
139 139 except Exception, inst:
140 140 exc(lr, _("reading manifest delta %s") % short(n), inst)
141 141
142 142 ui.status(_("crosschecking files in changesets and manifests\n"))
143 143
144 144 if havemf:
145 145 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
146 146 err(c, _("changeset refers to unknown manifest %s") % short(m))
147 147 del mflinkrevs
148 148
149 149 for f in util.sort(filelinkrevs):
150 150 if f not in filenodes:
151 151 lr = filelinkrevs[f][0]
152 152 err(lr, _("in changeset but not in manifest"), f)
153 153
154 154 if havecl:
155 155 for f in util.sort(filenodes):
156 156 if f not in filelinkrevs:
157 157 try:
158 lr = min([repo.file(f).linkrev(n) for n in filenodes[f]])
158 fl = repo.file(f)
159 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
159 160 except:
160 161 lr = None
161 162 err(lr, _("in manifest but not in changeset"), f)
162 163
163 164 ui.status(_("checking files\n"))
164 165
165 166 storefiles = {}
166 167 for f, f2, size in repo.store.datafiles():
167 168 if not f:
168 169 err(None, _("cannot decode filename '%s'") % f2)
169 170 elif size > 0:
170 171 storefiles[f] = True
171 172
172 173 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
173 174 for f in files:
174 175 fl = repo.file(f)
175 176
176 177 for ff in fl.files():
177 178 try:
178 179 del storefiles[ff]
179 180 except KeyError:
180 181 err(0, _("missing revlog!"), ff)
181 182
182 183 checklog(fl, f)
183 184 seen = {}
184 185 for i in fl:
185 186 revisions += 1
186 187 n = fl.node(i)
187 188 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
188 189 if f in filenodes:
189 190 if havemf and n not in filenodes[f]:
190 191 err(lr, _("%s not in manifests") % (short(n)), f)
191 192 else:
192 193 del filenodes[f][n]
193 194
194 195 # verify contents
195 196 try:
196 197 t = fl.read(n)
197 198 rp = fl.renamed(n)
198 199 if len(t) != fl.size(i):
199 200 if not fl._readmeta(n): # ancient copy?
200 201 err(lr, _("unpacked size is %s, %s expected") %
201 202 (len(t), fl.size(i)), f)
202 203 except Exception, inst:
203 204 exc(lr, _("unpacking %s") % short(n), inst, f)
204 205
205 206 # check renames
206 207 try:
207 208 if rp:
208 209 fl2 = repo.file(rp[0])
209 210 if not len(fl2):
210 211 err(lr, _("empty or missing copy source revlog %s:%s")
211 212 % (rp[0], short(rp[1])), f)
212 213 elif rp[1] == nullid:
213 214 warn(_("warning: %s@%s: copy source revision is nullid %s:%s")
214 215 % (f, lr, rp[0], short(rp[1])))
215 216 else:
216 217 rev = fl2.rev(rp[1])
217 218 except Exception, inst:
218 219 exc(lr, _("checking rename of %s") % short(n), inst, f)
219 220
220 221 # cross-check
221 222 if f in filenodes:
222 223 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].items()]
223 224 for lr, node in util.sort(fns):
224 225 err(lr, _("%s in manifests not found") % short(node), f)
225 226
226 227 for f in storefiles:
227 228 warn(_("warning: orphan revlog '%s'") % f)
228 229
229 230 ui.status(_("%d files, %d changesets, %d total revisions\n") %
230 231 (len(files), len(cl), revisions))
231 232 if warnings[0]:
232 233 ui.warn(_("%d warnings encountered!\n") % warnings[0])
233 234 if errors[0]:
234 235 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
235 236 if badrevs:
236 237 ui.warn(_("(first damaged changeset appears to be %d)\n")
237 238 % min(badrevs))
238 239 return 1
General Comments 0
You need to be logged in to leave comments. Login now