##// END OF EJS Templates
add a -r/--rev option to heads to show only heads descendant from rev
Benoit Boissinot -
r1550:ccb9b62d default
parent child Browse files
Show More
@@ -1,2652 +1,2656 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from node import *
10 10 from i18n import gettext as _
11 11 demandload(globals(), "os re sys signal shutil imp urllib pdb")
12 12 demandload(globals(), "fancyopts ui hg util lock revlog")
13 13 demandload(globals(), "fnmatch hgweb mdiff random signal time traceback")
14 14 demandload(globals(), "errno socket version struct atexit sets bz2")
15 15
16 16 class UnknownCommand(Exception):
17 17 """Exception raised if command is not in the command table."""
18 18 class AmbiguousCommand(Exception):
19 19 """Exception raised if command shortcut matches more than one command."""
20 20
21 21 def filterfiles(filters, files):
22 22 l = [x for x in files if x in filters]
23 23
24 24 for t in filters:
25 25 if t and t[-1] != "/":
26 26 t += "/"
27 27 l += [x for x in files if x.startswith(t)]
28 28 return l
29 29
30 30 def relpath(repo, args):
31 31 cwd = repo.getcwd()
32 32 if cwd:
33 33 return [util.normpath(os.path.join(cwd, x)) for x in args]
34 34 return args
35 35
36 36 def matchpats(repo, cwd, pats=[], opts={}, head=''):
37 37 return util.cmdmatcher(repo.root, cwd, pats or ['.'], opts.get('include'),
38 38 opts.get('exclude'), head)
39 39
40 40 def makewalk(repo, pats, opts, head=''):
41 41 cwd = repo.getcwd()
42 42 files, matchfn, anypats = matchpats(repo, cwd, pats, opts, head)
43 43 exact = dict(zip(files, files))
44 44 def walk():
45 45 for src, fn in repo.walk(files=files, match=matchfn):
46 46 yield src, fn, util.pathto(cwd, fn), fn in exact
47 47 return files, matchfn, walk()
48 48
49 49 def walk(repo, pats, opts, head=''):
50 50 files, matchfn, results = makewalk(repo, pats, opts, head)
51 51 for r in results:
52 52 yield r
53 53
54 54 def walkchangerevs(ui, repo, cwd, pats, opts):
55 55 '''Iterate over files and the revs they changed in.
56 56
57 57 Callers most commonly need to iterate backwards over the history
58 58 it is interested in. Doing so has awful (quadratic-looking)
59 59 performance, so we use iterators in a "windowed" way.
60 60
61 61 We walk a window of revisions in the desired order. Within the
62 62 window, we first walk forwards to gather data, then in the desired
63 63 order (usually backwards) to display it.
64 64
65 65 This function returns an (iterator, getchange) pair. The
66 66 getchange function returns the changelog entry for a numeric
67 67 revision. The iterator yields 3-tuples. They will be of one of
68 68 the following forms:
69 69
70 70 "window", incrementing, lastrev: stepping through a window,
71 71 positive if walking forwards through revs, last rev in the
72 72 sequence iterated over - use to reset state for the current window
73 73
74 74 "add", rev, fns: out-of-order traversal of the given file names
75 75 fns, which changed during revision rev - use to gather data for
76 76 possible display
77 77
78 78 "iter", rev, None: in-order traversal of the revs earlier iterated
79 79 over with "add" - use to display data'''
80 80
81 81 if repo.changelog.count() == 0:
82 82 return [], False
83 83
84 84 cwd = repo.getcwd()
85 85 if not pats and cwd:
86 86 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
87 87 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
88 88 files, matchfn, anypats = matchpats(repo, (pats and cwd) or '',
89 89 pats, opts)
90 90 revs = map(int, revrange(ui, repo, opts['rev'] or ['tip:0']))
91 91 wanted = {}
92 92 slowpath = anypats
93 93 window = 300
94 94 fncache = {}
95 95
96 96 chcache = {}
97 97 def getchange(rev):
98 98 ch = chcache.get(rev)
99 99 if ch is None:
100 100 chcache[rev] = ch = repo.changelog.read(repo.lookup(str(rev)))
101 101 return ch
102 102
103 103 if not slowpath and not files:
104 104 # No files, no patterns. Display all revs.
105 105 wanted = dict(zip(revs, revs))
106 106 if not slowpath:
107 107 # Only files, no patterns. Check the history of each file.
108 108 def filerevgen(filelog):
109 109 for i in xrange(filelog.count() - 1, -1, -window):
110 110 revs = []
111 111 for j in xrange(max(0, i - window), i + 1):
112 112 revs.append(filelog.linkrev(filelog.node(j)))
113 113 revs.reverse()
114 114 for rev in revs:
115 115 yield rev
116 116
117 117 minrev, maxrev = min(revs), max(revs)
118 118 for file in files:
119 119 filelog = repo.file(file)
120 120 # A zero count may be a directory or deleted file, so
121 121 # try to find matching entries on the slow path.
122 122 if filelog.count() == 0:
123 123 slowpath = True
124 124 break
125 125 for rev in filerevgen(filelog):
126 126 if rev <= maxrev:
127 127 if rev < minrev:
128 128 break
129 129 fncache.setdefault(rev, [])
130 130 fncache[rev].append(file)
131 131 wanted[rev] = 1
132 132 if slowpath:
133 133 # The slow path checks files modified in every changeset.
134 134 def changerevgen():
135 135 for i in xrange(repo.changelog.count() - 1, -1, -window):
136 136 for j in xrange(max(0, i - window), i + 1):
137 137 yield j, getchange(j)[3]
138 138
139 139 for rev, changefiles in changerevgen():
140 140 matches = filter(matchfn, changefiles)
141 141 if matches:
142 142 fncache[rev] = matches
143 143 wanted[rev] = 1
144 144
145 145 def iterate():
146 146 for i in xrange(0, len(revs), window):
147 147 yield 'window', revs[0] < revs[-1], revs[-1]
148 148 nrevs = [rev for rev in revs[i:min(i+window, len(revs))]
149 149 if rev in wanted]
150 150 srevs = list(nrevs)
151 151 srevs.sort()
152 152 for rev in srevs:
153 153 fns = fncache.get(rev) or filter(matchfn, getchange(rev)[3])
154 154 yield 'add', rev, fns
155 155 for rev in nrevs:
156 156 yield 'iter', rev, None
157 157 return iterate(), getchange
158 158
159 159 revrangesep = ':'
160 160
161 161 def revrange(ui, repo, revs, revlog=None):
162 162 """Yield revision as strings from a list of revision specifications."""
163 163 if revlog is None:
164 164 revlog = repo.changelog
165 165 revcount = revlog.count()
166 166 def fix(val, defval):
167 167 if not val:
168 168 return defval
169 169 try:
170 170 num = int(val)
171 171 if str(num) != val:
172 172 raise ValueError
173 173 if num < 0: num += revcount
174 174 if num < 0: num = 0
175 175 elif num >= revcount:
176 176 raise ValueError
177 177 except ValueError:
178 178 try:
179 179 num = repo.changelog.rev(repo.lookup(val))
180 180 except KeyError:
181 181 try:
182 182 num = revlog.rev(revlog.lookup(val))
183 183 except KeyError:
184 184 raise util.Abort(_('invalid revision identifier %s'), val)
185 185 return num
186 186 seen = {}
187 187 for spec in revs:
188 188 if spec.find(revrangesep) >= 0:
189 189 start, end = spec.split(revrangesep, 1)
190 190 start = fix(start, 0)
191 191 end = fix(end, revcount - 1)
192 192 step = start > end and -1 or 1
193 193 for rev in xrange(start, end+step, step):
194 194 if rev in seen: continue
195 195 seen[rev] = 1
196 196 yield str(rev)
197 197 else:
198 198 rev = fix(spec, None)
199 199 if rev in seen: continue
200 200 seen[rev] = 1
201 201 yield str(rev)
202 202
203 203 def make_filename(repo, r, pat, node=None,
204 204 total=None, seqno=None, revwidth=None, pathname=None):
205 205 node_expander = {
206 206 'H': lambda: hex(node),
207 207 'R': lambda: str(r.rev(node)),
208 208 'h': lambda: short(node),
209 209 }
210 210 expander = {
211 211 '%': lambda: '%',
212 212 'b': lambda: os.path.basename(repo.root),
213 213 }
214 214
215 215 try:
216 216 if node:
217 217 expander.update(node_expander)
218 218 if node and revwidth is not None:
219 219 expander['r'] = lambda: str(r.rev(node)).zfill(revwidth)
220 220 if total is not None:
221 221 expander['N'] = lambda: str(total)
222 222 if seqno is not None:
223 223 expander['n'] = lambda: str(seqno)
224 224 if total is not None and seqno is not None:
225 225 expander['n'] = lambda:str(seqno).zfill(len(str(total)))
226 226 if pathname is not None:
227 227 expander['s'] = lambda: os.path.basename(pathname)
228 228 expander['d'] = lambda: os.path.dirname(pathname) or '.'
229 229 expander['p'] = lambda: pathname
230 230
231 231 newname = []
232 232 patlen = len(pat)
233 233 i = 0
234 234 while i < patlen:
235 235 c = pat[i]
236 236 if c == '%':
237 237 i += 1
238 238 c = pat[i]
239 239 c = expander[c]()
240 240 newname.append(c)
241 241 i += 1
242 242 return ''.join(newname)
243 243 except KeyError, inst:
244 244 raise util.Abort(_("invalid format spec '%%%s' in output file name"),
245 245 inst.args[0])
246 246
247 247 def make_file(repo, r, pat, node=None,
248 248 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
249 249 if not pat or pat == '-':
250 250 return 'w' in mode and sys.stdout or sys.stdin
251 251 if hasattr(pat, 'write') and 'w' in mode:
252 252 return pat
253 253 if hasattr(pat, 'read') and 'r' in mode:
254 254 return pat
255 255 return open(make_filename(repo, r, pat, node, total, seqno, revwidth,
256 256 pathname),
257 257 mode)
258 258
259 259 def dodiff(fp, ui, repo, node1, node2, files=None, match=util.always,
260 260 changes=None, text=False):
261 261 if not changes:
262 262 (c, a, d, u) = repo.changes(node1, node2, files, match=match)
263 263 else:
264 264 (c, a, d, u) = changes
265 265 if files:
266 266 c, a, d = map(lambda x: filterfiles(files, x), (c, a, d))
267 267
268 268 if not c and not a and not d:
269 269 return
270 270
271 271 if node2:
272 272 change = repo.changelog.read(node2)
273 273 mmap2 = repo.manifest.read(change[0])
274 274 date2 = util.datestr(change[2])
275 275 def read(f):
276 276 return repo.file(f).read(mmap2[f])
277 277 else:
278 278 date2 = util.datestr()
279 279 if not node1:
280 280 node1 = repo.dirstate.parents()[0]
281 281 def read(f):
282 282 return repo.wfile(f).read()
283 283
284 284 if ui.quiet:
285 285 r = None
286 286 else:
287 287 hexfunc = ui.verbose and hex or short
288 288 r = [hexfunc(node) for node in [node1, node2] if node]
289 289
290 290 change = repo.changelog.read(node1)
291 291 mmap = repo.manifest.read(change[0])
292 292 date1 = util.datestr(change[2])
293 293
294 294 for f in c:
295 295 to = None
296 296 if f in mmap:
297 297 to = repo.file(f).read(mmap[f])
298 298 tn = read(f)
299 299 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text))
300 300 for f in a:
301 301 to = None
302 302 tn = read(f)
303 303 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text))
304 304 for f in d:
305 305 to = repo.file(f).read(mmap[f])
306 306 tn = None
307 307 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text))
308 308
309 309 def trimuser(ui, name, rev, revcache):
310 310 """trim the name of the user who committed a change"""
311 311 user = revcache.get(rev)
312 312 if user is None:
313 313 user = revcache[rev] = ui.shortuser(name)
314 314 return user
315 315
316 316 def show_changeset(ui, repo, rev=0, changenode=None, brinfo=None):
317 317 """show a single changeset or file revision"""
318 318 log = repo.changelog
319 319 if changenode is None:
320 320 changenode = log.node(rev)
321 321 elif not rev:
322 322 rev = log.rev(changenode)
323 323
324 324 if ui.quiet:
325 325 ui.write("%d:%s\n" % (rev, short(changenode)))
326 326 return
327 327
328 328 changes = log.read(changenode)
329 329 date = util.datestr(changes[2])
330 330
331 331 parents = [(log.rev(p), ui.verbose and hex(p) or short(p))
332 332 for p in log.parents(changenode)
333 333 if ui.debugflag or p != nullid]
334 334 if not ui.debugflag and len(parents) == 1 and parents[0][0] == rev-1:
335 335 parents = []
336 336
337 337 if ui.verbose:
338 338 ui.write(_("changeset: %d:%s\n") % (rev, hex(changenode)))
339 339 else:
340 340 ui.write(_("changeset: %d:%s\n") % (rev, short(changenode)))
341 341
342 342 for tag in repo.nodetags(changenode):
343 343 ui.status(_("tag: %s\n") % tag)
344 344 for parent in parents:
345 345 ui.write(_("parent: %d:%s\n") % parent)
346 346
347 347 if brinfo and changenode in brinfo:
348 348 br = brinfo[changenode]
349 349 ui.write(_("branch: %s\n") % " ".join(br))
350 350
351 351 ui.debug(_("manifest: %d:%s\n") % (repo.manifest.rev(changes[0]),
352 352 hex(changes[0])))
353 353 ui.status(_("user: %s\n") % changes[1])
354 354 ui.status(_("date: %s\n") % date)
355 355
356 356 if ui.debugflag:
357 357 files = repo.changes(log.parents(changenode)[0], changenode)
358 358 for key, value in zip([_("files:"), _("files+:"), _("files-:")], files):
359 359 if value:
360 360 ui.note("%-12s %s\n" % (key, " ".join(value)))
361 361 else:
362 362 ui.note(_("files: %s\n") % " ".join(changes[3]))
363 363
364 364 description = changes[4].strip()
365 365 if description:
366 366 if ui.verbose:
367 367 ui.status(_("description:\n"))
368 368 ui.status(description)
369 369 ui.status("\n\n")
370 370 else:
371 371 ui.status(_("summary: %s\n") % description.splitlines()[0])
372 372 ui.status("\n")
373 373
374 374 def show_version(ui):
375 375 """output version and copyright information"""
376 376 ui.write(_("Mercurial Distributed SCM (version %s)\n")
377 377 % version.get_version())
378 378 ui.status(_(
379 379 "\nCopyright (C) 2005 Matt Mackall <mpm@selenic.com>\n"
380 380 "This is free software; see the source for copying conditions. "
381 381 "There is NO\nwarranty; "
382 382 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
383 383 ))
384 384
385 385 def help_(ui, cmd=None, with_version=False):
386 386 """show help for a given command or all commands"""
387 387 option_lists = []
388 388 if cmd and cmd != 'shortlist':
389 389 if with_version:
390 390 show_version(ui)
391 391 ui.write('\n')
392 392 aliases, i = find(cmd)
393 393 # synopsis
394 394 ui.write("%s\n\n" % i[2])
395 395
396 396 # description
397 397 doc = i[0].__doc__
398 398 if ui.quiet:
399 399 doc = doc.splitlines(0)[0]
400 400 ui.write("%s\n" % doc.rstrip())
401 401
402 402 if not ui.quiet:
403 403 # aliases
404 404 if len(aliases) > 1:
405 405 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
406 406
407 407 # options
408 408 if i[1]:
409 409 option_lists.append(("options", i[1]))
410 410
411 411 else:
412 412 # program name
413 413 if ui.verbose or with_version:
414 414 show_version(ui)
415 415 else:
416 416 ui.status(_("Mercurial Distributed SCM\n"))
417 417 ui.status('\n')
418 418
419 419 # list of commands
420 420 if cmd == "shortlist":
421 421 ui.status(_('basic commands (use "hg help" '
422 422 'for the full list or option "-v" for details):\n\n'))
423 423 elif ui.verbose:
424 424 ui.status(_('list of commands:\n\n'))
425 425 else:
426 426 ui.status(_('list of commands (use "hg help -v" '
427 427 'to show aliases and global options):\n\n'))
428 428
429 429 h = {}
430 430 cmds = {}
431 431 for c, e in table.items():
432 432 f = c.split("|")[0]
433 433 if cmd == "shortlist" and not f.startswith("^"):
434 434 continue
435 435 f = f.lstrip("^")
436 436 if not ui.debugflag and f.startswith("debug"):
437 437 continue
438 438 d = ""
439 439 if e[0].__doc__:
440 440 d = e[0].__doc__.splitlines(0)[0].rstrip()
441 441 h[f] = d
442 442 cmds[f]=c.lstrip("^")
443 443
444 444 fns = h.keys()
445 445 fns.sort()
446 446 m = max(map(len, fns))
447 447 for f in fns:
448 448 if ui.verbose:
449 449 commands = cmds[f].replace("|",", ")
450 450 ui.write(" %s:\n %s\n"%(commands,h[f]))
451 451 else:
452 452 ui.write(' %-*s %s\n' % (m, f, h[f]))
453 453
454 454 # global options
455 455 if ui.verbose:
456 456 option_lists.append(("global options", globalopts))
457 457
458 458 # list all option lists
459 459 opt_output = []
460 460 for title, options in option_lists:
461 461 opt_output.append(("\n%s:\n" % title, None))
462 462 for shortopt, longopt, default, desc in options:
463 463 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
464 464 longopt and " --%s" % longopt),
465 465 "%s%s" % (desc,
466 466 default and _(" (default: %s)") % default
467 467 or "")))
468 468
469 469 if opt_output:
470 470 opts_len = max([len(line[0]) for line in opt_output if line[1]])
471 471 for first, second in opt_output:
472 472 if second:
473 473 ui.write(" %-*s %s\n" % (opts_len, first, second))
474 474 else:
475 475 ui.write("%s\n" % first)
476 476
477 477 # Commands start here, listed alphabetically
478 478
479 479 def add(ui, repo, *pats, **opts):
480 480 """add the specified files on the next commit
481 481
482 482 Schedule files to be version controlled and added to the repository.
483 483
484 484 The files will be added to the repository at the next commit.
485 485
486 486 If no names are given, add all files in the current directory and
487 487 its subdirectories.
488 488 """
489 489
490 490 names = []
491 491 for src, abs, rel, exact in walk(repo, pats, opts):
492 492 if exact:
493 493 if ui.verbose: ui.status(_('adding %s\n') % rel)
494 494 names.append(abs)
495 495 elif repo.dirstate.state(abs) == '?':
496 496 ui.status(_('adding %s\n') % rel)
497 497 names.append(abs)
498 498 repo.add(names)
499 499
500 500 def addremove(ui, repo, *pats, **opts):
501 501 """add all new files, delete all missing files
502 502
503 503 Add all new files and remove all missing files from the repository.
504 504
505 505 New files are ignored if they match any of the patterns in .hgignore. As
506 506 with add, these changes take effect at the next commit.
507 507 """
508 508 add, remove = [], []
509 509 for src, abs, rel, exact in walk(repo, pats, opts):
510 510 if src == 'f' and repo.dirstate.state(abs) == '?':
511 511 add.append(abs)
512 512 if ui.verbose or not exact:
513 513 ui.status(_('adding %s\n') % rel)
514 514 if repo.dirstate.state(abs) != 'r' and not os.path.exists(rel):
515 515 remove.append(abs)
516 516 if ui.verbose or not exact:
517 517 ui.status(_('removing %s\n') % rel)
518 518 repo.add(add)
519 519 repo.remove(remove)
520 520
521 521 def annotate(ui, repo, *pats, **opts):
522 522 """show changeset information per file line
523 523
524 524 List changes in files, showing the revision id responsible for each line
525 525
526 526 This command is useful to discover who did a change or when a change took
527 527 place.
528 528
529 529 Without the -a option, annotate will avoid processing files it
530 530 detects as binary. With -a, annotate will generate an annotation
531 531 anyway, probably with undesirable results.
532 532 """
533 533 def getnode(rev):
534 534 return short(repo.changelog.node(rev))
535 535
536 536 ucache = {}
537 537 def getname(rev):
538 538 cl = repo.changelog.read(repo.changelog.node(rev))
539 539 return trimuser(ui, cl[1], rev, ucache)
540 540
541 541 dcache = {}
542 542 def getdate(rev):
543 543 datestr = dcache.get(rev)
544 544 if datestr is None:
545 545 cl = repo.changelog.read(repo.changelog.node(rev))
546 546 datestr = dcache[rev] = util.datestr(cl[2])
547 547 return datestr
548 548
549 549 if not pats:
550 550 raise util.Abort(_('at least one file name or pattern required'))
551 551
552 552 opmap = [['user', getname], ['number', str], ['changeset', getnode],
553 553 ['date', getdate]]
554 554 if not opts['user'] and not opts['changeset'] and not opts['date']:
555 555 opts['number'] = 1
556 556
557 557 if opts['rev']:
558 558 node = repo.changelog.lookup(opts['rev'])
559 559 else:
560 560 node = repo.dirstate.parents()[0]
561 561 change = repo.changelog.read(node)
562 562 mmap = repo.manifest.read(change[0])
563 563
564 564 for src, abs, rel, exact in walk(repo, pats, opts):
565 565 if abs not in mmap:
566 566 ui.warn(_("warning: %s is not in the repository!\n") % rel)
567 567 continue
568 568
569 569 f = repo.file(abs)
570 570 if not opts['text'] and util.binary(f.read(mmap[abs])):
571 571 ui.write(_("%s: binary file\n") % rel)
572 572 continue
573 573
574 574 lines = f.annotate(mmap[abs])
575 575 pieces = []
576 576
577 577 for o, f in opmap:
578 578 if opts[o]:
579 579 l = [f(n) for n, dummy in lines]
580 580 if l:
581 581 m = max(map(len, l))
582 582 pieces.append(["%*s" % (m, x) for x in l])
583 583
584 584 if pieces:
585 585 for p, l in zip(zip(*pieces), lines):
586 586 ui.write("%s: %s" % (" ".join(p), l[1]))
587 587
588 588 def bundle(ui, repo, fname, dest="default-push", **opts):
589 589 """create a changegroup file
590 590
591 591 Generate a compressed changegroup file collecting all changesets
592 592 not found in the other repository.
593 593
594 594 This file can then be transferred using conventional means and
595 595 applied to another repository with the unbundle command. This is
596 596 useful when native push and pull are not available or when
597 597 exporting an entire repository is undesirable. The standard file
598 598 extension is ".hg".
599 599
600 600 Unlike import/export, this exactly preserves all changeset
601 601 contents including permissions, rename data, and revision history.
602 602 """
603 603 f = open(fname, "wb")
604 604 dest = ui.expandpath(dest, repo.root)
605 605 other = hg.repository(ui, dest)
606 606 o = repo.findoutgoing(other)
607 607 cg = repo.changegroup(o)
608 608
609 609 try:
610 610 f.write("HG10")
611 611 z = bz2.BZ2Compressor(9)
612 612 while 1:
613 613 chunk = cg.read(4096)
614 614 if not chunk:
615 615 break
616 616 f.write(z.compress(chunk))
617 617 f.write(z.flush())
618 618 except:
619 619 os.unlink(fname)
620 620 raise
621 621
622 622 def cat(ui, repo, file1, *pats, **opts):
623 623 """output the latest or given revisions of files
624 624
625 625 Print the specified files as they were at the given revision.
626 626 If no revision is given then the tip is used.
627 627
628 628 Output may be to a file, in which case the name of the file is
629 629 given using a format string. The formatting rules are the same as
630 630 for the export command, with the following additions:
631 631
632 632 %s basename of file being printed
633 633 %d dirname of file being printed, or '.' if in repo root
634 634 %p root-relative path name of file being printed
635 635 """
636 636 mf = {}
637 637 rev = opts['rev']
638 638 if rev:
639 639 change = repo.changelog.read(repo.lookup(rev))
640 640 mf = repo.manifest.read(change[0])
641 641 for src, abs, rel, exact in walk(repo, (file1,) + pats, opts):
642 642 r = repo.file(abs)
643 643 if rev:
644 644 try:
645 645 n = mf[abs]
646 646 except (hg.RepoError, KeyError):
647 647 try:
648 648 n = r.lookup(rev)
649 649 except KeyError, inst:
650 650 raise util.Abort(_('cannot find file %s in rev %s'), rel, rev)
651 651 else:
652 652 n = r.tip()
653 653 fp = make_file(repo, r, opts['output'], node=n, pathname=abs)
654 654 fp.write(r.read(n))
655 655
656 656 def clone(ui, source, dest=None, **opts):
657 657 """make a copy of an existing repository
658 658
659 659 Create a copy of an existing repository in a new directory.
660 660
661 661 If no destination directory name is specified, it defaults to the
662 662 basename of the source.
663 663
664 664 The location of the source is added to the new repository's
665 665 .hg/hgrc file, as the default to be used for future pulls.
666 666
667 667 For efficiency, hardlinks are used for cloning whenever the source
668 668 and destination are on the same filesystem. Some filesystems,
669 669 such as AFS, implement hardlinking incorrectly, but do not report
670 670 errors. In these cases, use the --pull option to avoid
671 671 hardlinking.
672 672 """
673 673 if dest is None:
674 674 dest = os.path.basename(os.path.normpath(source))
675 675
676 676 if os.path.exists(dest):
677 677 raise util.Abort(_("destination '%s' already exists"), dest)
678 678
679 679 dest = os.path.realpath(dest)
680 680
681 681 class Dircleanup:
682 682 def __init__(self, dir_):
683 683 self.rmtree = shutil.rmtree
684 684 self.dir_ = dir_
685 685 os.mkdir(dir_)
686 686 def close(self):
687 687 self.dir_ = None
688 688 def __del__(self):
689 689 if self.dir_:
690 690 self.rmtree(self.dir_, True)
691 691
692 692 if opts['ssh']:
693 693 ui.setconfig("ui", "ssh", opts['ssh'])
694 694 if opts['remotecmd']:
695 695 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
696 696
697 697 if not os.path.exists(source):
698 698 source = ui.expandpath(source)
699 699
700 700 d = Dircleanup(dest)
701 701 abspath = source
702 702 other = hg.repository(ui, source)
703 703
704 704 copy = False
705 705 if other.dev() != -1:
706 706 abspath = os.path.abspath(source)
707 707 if not opts['pull'] and not opts['rev']:
708 708 copy = True
709 709
710 710 if copy:
711 711 try:
712 712 # we use a lock here because if we race with commit, we
713 713 # can end up with extra data in the cloned revlogs that's
714 714 # not pointed to by changesets, thus causing verify to
715 715 # fail
716 716 l1 = lock.lock(os.path.join(source, ".hg", "lock"))
717 717 except OSError:
718 718 copy = False
719 719
720 720 if copy:
721 721 # we lock here to avoid premature writing to the target
722 722 os.mkdir(os.path.join(dest, ".hg"))
723 723 l2 = lock.lock(os.path.join(dest, ".hg", "lock"))
724 724
725 725 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
726 726 for f in files.split():
727 727 src = os.path.join(source, ".hg", f)
728 728 dst = os.path.join(dest, ".hg", f)
729 729 try:
730 730 util.copyfiles(src, dst)
731 731 except OSError, inst:
732 732 if inst.errno != errno.ENOENT: raise
733 733
734 734 repo = hg.repository(ui, dest)
735 735
736 736 else:
737 737 revs = None
738 738 if opts['rev']:
739 739 if not other.local():
740 740 raise util.Abort("clone -r not supported yet for remote repositories.")
741 741 else:
742 742 revs = [other.lookup(rev) for rev in opts['rev']]
743 743 repo = hg.repository(ui, dest, create=1)
744 744 repo.pull(other, heads = revs)
745 745
746 746 f = repo.opener("hgrc", "w", text=True)
747 747 f.write("[paths]\n")
748 748 f.write("default = %s\n" % abspath)
749 749 f.close()
750 750
751 751 if not opts['noupdate']:
752 752 update(ui, repo)
753 753
754 754 d.close()
755 755
756 756 def commit(ui, repo, *pats, **opts):
757 757 """commit the specified files or all outstanding changes
758 758
759 759 Commit changes to the given files into the repository.
760 760
761 761 If a list of files is omitted, all changes reported by "hg status"
762 762 from the root of the repository will be commited.
763 763
764 764 The HGEDITOR or EDITOR environment variables are used to start an
765 765 editor to add a commit comment.
766 766 """
767 767 message = opts['message']
768 768 logfile = opts['logfile']
769 769
770 770 if message and logfile:
771 771 raise util.Abort(_('options --message and --logfile are mutually '
772 772 'exclusive'))
773 773 if not message and logfile:
774 774 try:
775 775 if logfile == '-':
776 776 message = sys.stdin.read()
777 777 else:
778 778 message = open(logfile).read()
779 779 except IOError, inst:
780 780 raise util.Abort(_("can't read commit message '%s': %s") %
781 781 (logfile, inst.strerror))
782 782
783 783 if opts['addremove']:
784 784 addremove(ui, repo, *pats, **opts)
785 785 cwd = repo.getcwd()
786 786 if not pats and cwd:
787 787 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
788 788 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
789 789 fns, match, anypats = matchpats(repo, (pats and repo.getcwd()) or '',
790 790 pats, opts)
791 791 if pats:
792 792 c, a, d, u = repo.changes(files=fns, match=match)
793 793 files = c + a + [fn for fn in d if repo.dirstate.state(fn) == 'r']
794 794 else:
795 795 files = []
796 796 try:
797 797 repo.commit(files, message, opts['user'], opts['date'], match)
798 798 except ValueError, inst:
799 799 raise util.Abort(str(inst))
800 800
801 801 def docopy(ui, repo, pats, opts):
802 802 cwd = repo.getcwd()
803 803 errors = 0
804 804 copied = []
805 805 targets = {}
806 806
807 807 def okaytocopy(abs, rel, exact):
808 808 reasons = {'?': _('is not managed'),
809 809 'a': _('has been marked for add')}
810 810 reason = reasons.get(repo.dirstate.state(abs))
811 811 if reason:
812 812 if exact: ui.warn(_('%s: not copying - file %s\n') % (rel, reason))
813 813 else:
814 814 return True
815 815
816 816 def copy(abssrc, relsrc, target, exact):
817 817 abstarget = util.canonpath(repo.root, cwd, target)
818 818 reltarget = util.pathto(cwd, abstarget)
819 819 prevsrc = targets.get(abstarget)
820 820 if prevsrc is not None:
821 821 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
822 822 (reltarget, abssrc, prevsrc))
823 823 return
824 824 elif os.path.exists(reltarget):
825 825 if opts['force']:
826 826 os.unlink(reltarget)
827 827 else:
828 828 ui.warn(_('%s: not overwriting - file exists\n') %
829 829 reltarget)
830 830 return
831 831 if ui.verbose or not exact:
832 832 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
833 833 if not opts['after']:
834 834 targetdir = os.path.dirname(reltarget) or '.'
835 835 if not os.path.isdir(targetdir):
836 836 os.makedirs(targetdir)
837 837 try:
838 838 shutil.copyfile(relsrc, reltarget)
839 839 shutil.copymode(relsrc, reltarget)
840 840 except shutil.Error, inst:
841 841 raise util.Abort(str(inst))
842 842 except IOError, inst:
843 843 if inst.errno == errno.ENOENT:
844 844 ui.warn(_('%s: deleted in working copy\n') % relsrc)
845 845 else:
846 846 ui.warn(_('%s: cannot copy - %s\n') %
847 847 (relsrc, inst.strerror))
848 848 errors += 1
849 849 return
850 850 targets[abstarget] = abssrc
851 851 repo.copy(abssrc, abstarget)
852 852 copied.append((abssrc, relsrc, exact))
853 853
854 854 pats = list(pats)
855 855 if not pats:
856 856 raise util.Abort(_('no source or destination specified'))
857 857 if len(pats) == 1:
858 858 raise util.Abort(_('no destination specified'))
859 859 dest = pats.pop()
860 860 destdirexists = os.path.isdir(dest)
861 861 if (len(pats) > 1 or not os.path.exists(pats[0])) and not destdirexists:
862 862 raise util.Abort(_('with multiple sources, destination must be an '
863 863 'existing directory'))
864 864
865 865 for pat in pats:
866 866 if os.path.isdir(pat):
867 867 if destdirexists:
868 868 striplen = len(os.path.split(pat)[0])
869 869 else:
870 870 striplen = len(pat)
871 871 if striplen:
872 872 striplen += len(os.sep)
873 873 targetpath = lambda p: os.path.join(dest, p[striplen:])
874 874 elif destdirexists:
875 875 targetpath = lambda p: os.path.join(dest, os.path.basename(p))
876 876 else:
877 877 targetpath = lambda p: dest
878 878 for tag, abssrc, relsrc, exact in walk(repo, [pat], opts):
879 879 if okaytocopy(abssrc, relsrc, exact):
880 880 copy(abssrc, relsrc, targetpath(abssrc), exact)
881 881
882 882 if errors:
883 883 ui.warn(_('(consider using --after)\n'))
884 884 if len(copied) == 0:
885 885 raise util.Abort(_('no files to copy'))
886 886 return errors, copied
887 887
888 888 def copy(ui, repo, *pats, **opts):
889 889 """mark files as copied for the next commit
890 890
891 891 Mark dest as having copies of source files. If dest is a
892 892 directory, copies are put in that directory. If dest is a file,
893 893 there can only be one source.
894 894
895 895 By default, this command copies the contents of files as they
896 896 stand in the working directory. If invoked with --after, the
897 897 operation is recorded, but no copying is performed.
898 898
899 899 This command takes effect in the next commit.
900 900
901 901 NOTE: This command should be treated as experimental. While it
902 902 should properly record copied files, this information is not yet
903 903 fully used by merge, nor fully reported by log.
904 904 """
905 905 errs, copied = docopy(ui, repo, pats, opts)
906 906 return errs
907 907
908 908 def debugancestor(ui, index, rev1, rev2):
909 909 """find the ancestor revision of two revisions in a given index"""
910 910 r = revlog.revlog(util.opener(os.getcwd()), index, "")
911 911 a = r.ancestor(r.lookup(rev1), r.lookup(rev2))
912 912 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
913 913
914 914 def debugcheckstate(ui, repo):
915 915 """validate the correctness of the current dirstate"""
916 916 parent1, parent2 = repo.dirstate.parents()
917 917 repo.dirstate.read()
918 918 dc = repo.dirstate.map
919 919 keys = dc.keys()
920 920 keys.sort()
921 921 m1n = repo.changelog.read(parent1)[0]
922 922 m2n = repo.changelog.read(parent2)[0]
923 923 m1 = repo.manifest.read(m1n)
924 924 m2 = repo.manifest.read(m2n)
925 925 errors = 0
926 926 for f in dc:
927 927 state = repo.dirstate.state(f)
928 928 if state in "nr" and f not in m1:
929 929 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
930 930 errors += 1
931 931 if state in "a" and f in m1:
932 932 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
933 933 errors += 1
934 934 if state in "m" and f not in m1 and f not in m2:
935 935 ui.warn(_("%s in state %s, but not in either manifest\n") %
936 936 (f, state))
937 937 errors += 1
938 938 for f in m1:
939 939 state = repo.dirstate.state(f)
940 940 if state not in "nrm":
941 941 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
942 942 errors += 1
943 943 if errors:
944 944 raise util.Abort(_(".hg/dirstate inconsistent with current parent's manifest"))
945 945
946 946 def debugconfig(ui):
947 947 """show combined config settings from all hgrc files"""
948 948 try:
949 949 repo = hg.repository(ui)
950 950 except hg.RepoError:
951 951 pass
952 952 for section, name, value in ui.walkconfig():
953 953 ui.write('%s.%s=%s\n' % (section, name, value))
954 954
955 955 def debugsetparents(ui, repo, rev1, rev2=None):
956 956 """manually set the parents of the current working directory
957 957
958 958 This is useful for writing repository conversion tools, but should
959 959 be used with care.
960 960 """
961 961
962 962 if not rev2:
963 963 rev2 = hex(nullid)
964 964
965 965 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
966 966
967 967 def debugstate(ui, repo):
968 968 """show the contents of the current dirstate"""
969 969 repo.dirstate.read()
970 970 dc = repo.dirstate.map
971 971 keys = dc.keys()
972 972 keys.sort()
973 973 for file_ in keys:
974 974 ui.write("%c %3o %10d %s %s\n"
975 975 % (dc[file_][0], dc[file_][1] & 0777, dc[file_][2],
976 976 time.strftime("%x %X",
977 977 time.localtime(dc[file_][3])), file_))
978 978 for f in repo.dirstate.copies:
979 979 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copies[f], f))
980 980
981 981 def debugdata(ui, file_, rev):
982 982 """dump the contents of an data file revision"""
983 983 r = revlog.revlog(util.opener(os.getcwd()), file_[:-2] + ".i", file_)
984 984 try:
985 985 ui.write(r.revision(r.lookup(rev)))
986 986 except KeyError:
987 987 raise util.Abort(_('invalid revision identifier %s'), rev)
988 988
989 989 def debugindex(ui, file_):
990 990 """dump the contents of an index file"""
991 991 r = revlog.revlog(util.opener(os.getcwd()), file_, "")
992 992 ui.write(" rev offset length base linkrev" +
993 993 " nodeid p1 p2\n")
994 994 for i in range(r.count()):
995 995 e = r.index[i]
996 996 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
997 997 i, e[0], e[1], e[2], e[3],
998 998 short(e[6]), short(e[4]), short(e[5])))
999 999
1000 1000 def debugindexdot(ui, file_):
1001 1001 """dump an index DAG as a .dot file"""
1002 1002 r = revlog.revlog(util.opener(os.getcwd()), file_, "")
1003 1003 ui.write("digraph G {\n")
1004 1004 for i in range(r.count()):
1005 1005 e = r.index[i]
1006 1006 ui.write("\t%d -> %d\n" % (r.rev(e[4]), i))
1007 1007 if e[5] != nullid:
1008 1008 ui.write("\t%d -> %d\n" % (r.rev(e[5]), i))
1009 1009 ui.write("}\n")
1010 1010
1011 1011 def debugrename(ui, repo, file, rev=None):
1012 1012 """dump rename information"""
1013 1013 r = repo.file(relpath(repo, [file])[0])
1014 1014 if rev:
1015 1015 try:
1016 1016 # assume all revision numbers are for changesets
1017 1017 n = repo.lookup(rev)
1018 1018 change = repo.changelog.read(n)
1019 1019 m = repo.manifest.read(change[0])
1020 1020 n = m[relpath(repo, [file])[0]]
1021 1021 except (hg.RepoError, KeyError):
1022 1022 n = r.lookup(rev)
1023 1023 else:
1024 1024 n = r.tip()
1025 1025 m = r.renamed(n)
1026 1026 if m:
1027 1027 ui.write(_("renamed from %s:%s\n") % (m[0], hex(m[1])))
1028 1028 else:
1029 1029 ui.write(_("not renamed\n"))
1030 1030
1031 1031 def debugwalk(ui, repo, *pats, **opts):
1032 1032 """show how files match on given patterns"""
1033 1033 items = list(walk(repo, pats, opts))
1034 1034 if not items:
1035 1035 return
1036 1036 fmt = '%%s %%-%ds %%-%ds %%s' % (
1037 1037 max([len(abs) for (src, abs, rel, exact) in items]),
1038 1038 max([len(rel) for (src, abs, rel, exact) in items]))
1039 1039 for src, abs, rel, exact in items:
1040 1040 line = fmt % (src, abs, rel, exact and 'exact' or '')
1041 1041 ui.write("%s\n" % line.rstrip())
1042 1042
1043 1043 def diff(ui, repo, *pats, **opts):
1044 1044 """diff working directory (or selected files)
1045 1045
1046 1046 Show differences between revisions for the specified files.
1047 1047
1048 1048 Differences between files are shown using the unified diff format.
1049 1049
1050 1050 When two revision arguments are given, then changes are shown
1051 1051 between those revisions. If only one revision is specified then
1052 1052 that revision is compared to the working directory, and, when no
1053 1053 revisions are specified, the working directory files are compared
1054 1054 to its parent.
1055 1055
1056 1056 Without the -a option, diff will avoid generating diffs of files
1057 1057 it detects as binary. With -a, diff will generate a diff anyway,
1058 1058 probably with undesirable results.
1059 1059 """
1060 1060 node1, node2 = None, None
1061 1061 revs = [repo.lookup(x) for x in opts['rev']]
1062 1062
1063 1063 if len(revs) > 0:
1064 1064 node1 = revs[0]
1065 1065 if len(revs) > 1:
1066 1066 node2 = revs[1]
1067 1067 if len(revs) > 2:
1068 1068 raise util.Abort(_("too many revisions to diff"))
1069 1069
1070 1070 fns, matchfn, anypats = matchpats(repo, repo.getcwd(), pats, opts)
1071 1071
1072 1072 dodiff(sys.stdout, ui, repo, node1, node2, fns, match=matchfn,
1073 1073 text=opts['text'])
1074 1074
1075 1075 def doexport(ui, repo, changeset, seqno, total, revwidth, opts):
1076 1076 node = repo.lookup(changeset)
1077 1077 prev, other = repo.changelog.parents(node)
1078 1078 change = repo.changelog.read(node)
1079 1079
1080 1080 fp = make_file(repo, repo.changelog, opts['output'],
1081 1081 node=node, total=total, seqno=seqno,
1082 1082 revwidth=revwidth)
1083 1083 if fp != sys.stdout:
1084 1084 ui.note("%s\n" % fp.name)
1085 1085
1086 1086 fp.write("# HG changeset patch\n")
1087 1087 fp.write("# User %s\n" % change[1])
1088 1088 fp.write("# Node ID %s\n" % hex(node))
1089 1089 fp.write("# Parent %s\n" % hex(prev))
1090 1090 if other != nullid:
1091 1091 fp.write("# Parent %s\n" % hex(other))
1092 1092 fp.write(change[4].rstrip())
1093 1093 fp.write("\n\n")
1094 1094
1095 1095 dodiff(fp, ui, repo, prev, node, text=opts['text'])
1096 1096 if fp != sys.stdout:
1097 1097 fp.close()
1098 1098
1099 1099 def export(ui, repo, *changesets, **opts):
1100 1100 """dump the header and diffs for one or more changesets
1101 1101
1102 1102 Print the changeset header and diffs for one or more revisions.
1103 1103
1104 1104 The information shown in the changeset header is: author,
1105 1105 changeset hash, parent and commit comment.
1106 1106
1107 1107 Output may be to a file, in which case the name of the file is
1108 1108 given using a format string. The formatting rules are as follows:
1109 1109
1110 1110 %% literal "%" character
1111 1111 %H changeset hash (40 bytes of hexadecimal)
1112 1112 %N number of patches being generated
1113 1113 %R changeset revision number
1114 1114 %b basename of the exporting repository
1115 1115 %h short-form changeset hash (12 bytes of hexadecimal)
1116 1116 %n zero-padded sequence number, starting at 1
1117 1117 %r zero-padded changeset revision number
1118 1118
1119 1119 Without the -a option, export will avoid generating diffs of files
1120 1120 it detects as binary. With -a, export will generate a diff anyway,
1121 1121 probably with undesirable results.
1122 1122 """
1123 1123 if not changesets:
1124 1124 raise util.Abort(_("export requires at least one changeset"))
1125 1125 seqno = 0
1126 1126 revs = list(revrange(ui, repo, changesets))
1127 1127 total = len(revs)
1128 1128 revwidth = max(map(len, revs))
1129 1129 ui.note(len(revs) > 1 and _("Exporting patches:\n") or _("Exporting patch:\n"))
1130 1130 for cset in revs:
1131 1131 seqno += 1
1132 1132 doexport(ui, repo, cset, seqno, total, revwidth, opts)
1133 1133
1134 1134 def forget(ui, repo, *pats, **opts):
1135 1135 """don't add the specified files on the next commit
1136 1136
1137 1137 Undo an 'hg add' scheduled for the next commit.
1138 1138 """
1139 1139 forget = []
1140 1140 for src, abs, rel, exact in walk(repo, pats, opts):
1141 1141 if repo.dirstate.state(abs) == 'a':
1142 1142 forget.append(abs)
1143 1143 if ui.verbose or not exact:
1144 1144 ui.status(_('forgetting %s\n') % rel)
1145 1145 repo.forget(forget)
1146 1146
1147 1147 def grep(ui, repo, pattern, *pats, **opts):
1148 1148 """search for a pattern in specified files and revisions
1149 1149
1150 1150 Search revisions of files for a regular expression.
1151 1151
1152 1152 This command behaves differently than Unix grep. It only accepts
1153 1153 Python/Perl regexps. It searches repository history, not the
1154 1154 working directory. It always prints the revision number in which
1155 1155 a match appears.
1156 1156
1157 1157 By default, grep only prints output for the first revision of a
1158 1158 file in which it finds a match. To get it to print every revision
1159 1159 that contains a change in match status ("-" for a match that
1160 1160 becomes a non-match, or "+" for a non-match that becomes a match),
1161 1161 use the --all flag.
1162 1162 """
1163 1163 reflags = 0
1164 1164 if opts['ignore_case']:
1165 1165 reflags |= re.I
1166 1166 regexp = re.compile(pattern, reflags)
1167 1167 sep, eol = ':', '\n'
1168 1168 if opts['print0']:
1169 1169 sep = eol = '\0'
1170 1170
1171 1171 fcache = {}
1172 1172 def getfile(fn):
1173 1173 if fn not in fcache:
1174 1174 fcache[fn] = repo.file(fn)
1175 1175 return fcache[fn]
1176 1176
1177 1177 def matchlines(body):
1178 1178 begin = 0
1179 1179 linenum = 0
1180 1180 while True:
1181 1181 match = regexp.search(body, begin)
1182 1182 if not match:
1183 1183 break
1184 1184 mstart, mend = match.span()
1185 1185 linenum += body.count('\n', begin, mstart) + 1
1186 1186 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1187 1187 lend = body.find('\n', mend)
1188 1188 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1189 1189 begin = lend + 1
1190 1190
1191 1191 class linestate:
1192 1192 def __init__(self, line, linenum, colstart, colend):
1193 1193 self.line = line
1194 1194 self.linenum = linenum
1195 1195 self.colstart = colstart
1196 1196 self.colend = colend
1197 1197 def __eq__(self, other):
1198 1198 return self.line == other.line
1199 1199 def __hash__(self):
1200 1200 return hash(self.line)
1201 1201
1202 1202 matches = {}
1203 1203 def grepbody(fn, rev, body):
1204 1204 matches[rev].setdefault(fn, {})
1205 1205 m = matches[rev][fn]
1206 1206 for lnum, cstart, cend, line in matchlines(body):
1207 1207 s = linestate(line, lnum, cstart, cend)
1208 1208 m[s] = s
1209 1209
1210 1210 prev = {}
1211 1211 ucache = {}
1212 1212 def display(fn, rev, states, prevstates):
1213 1213 diff = list(sets.Set(states).symmetric_difference(sets.Set(prevstates)))
1214 1214 diff.sort(lambda x, y: cmp(x.linenum, y.linenum))
1215 1215 counts = {'-': 0, '+': 0}
1216 1216 filerevmatches = {}
1217 1217 for l in diff:
1218 1218 if incrementing or not opts['all']:
1219 1219 change = ((l in prevstates) and '-') or '+'
1220 1220 r = rev
1221 1221 else:
1222 1222 change = ((l in states) and '-') or '+'
1223 1223 r = prev[fn]
1224 1224 cols = [fn, str(rev)]
1225 1225 if opts['line_number']: cols.append(str(l.linenum))
1226 1226 if opts['all']: cols.append(change)
1227 1227 if opts['user']: cols.append(trimuser(ui, getchange(rev)[1], rev,
1228 1228 ucache))
1229 1229 if opts['files_with_matches']:
1230 1230 c = (fn, rev)
1231 1231 if c in filerevmatches: continue
1232 1232 filerevmatches[c] = 1
1233 1233 else:
1234 1234 cols.append(l.line)
1235 1235 ui.write(sep.join(cols), eol)
1236 1236 counts[change] += 1
1237 1237 return counts['+'], counts['-']
1238 1238
1239 1239 fstate = {}
1240 1240 skip = {}
1241 1241 changeiter, getchange = walkchangerevs(ui, repo, repo.getcwd(), pats, opts)
1242 1242 count = 0
1243 1243 incrementing = False
1244 1244 for st, rev, fns in changeiter:
1245 1245 if st == 'window':
1246 1246 incrementing = rev
1247 1247 matches.clear()
1248 1248 elif st == 'add':
1249 1249 change = repo.changelog.read(repo.lookup(str(rev)))
1250 1250 mf = repo.manifest.read(change[0])
1251 1251 matches[rev] = {}
1252 1252 for fn in fns:
1253 1253 if fn in skip: continue
1254 1254 fstate.setdefault(fn, {})
1255 1255 try:
1256 1256 grepbody(fn, rev, getfile(fn).read(mf[fn]))
1257 1257 except KeyError:
1258 1258 pass
1259 1259 elif st == 'iter':
1260 1260 states = matches[rev].items()
1261 1261 states.sort()
1262 1262 for fn, m in states:
1263 1263 if fn in skip: continue
1264 1264 if incrementing or not opts['all'] or fstate[fn]:
1265 1265 pos, neg = display(fn, rev, m, fstate[fn])
1266 1266 count += pos + neg
1267 1267 if pos and not opts['all']:
1268 1268 skip[fn] = True
1269 1269 fstate[fn] = m
1270 1270 prev[fn] = rev
1271 1271
1272 1272 if not incrementing:
1273 1273 fstate = fstate.items()
1274 1274 fstate.sort()
1275 1275 for fn, state in fstate:
1276 1276 if fn in skip: continue
1277 1277 display(fn, rev, {}, state)
1278 1278 return (count == 0 and 1) or 0
1279 1279
1280 1280 def heads(ui, repo, **opts):
1281 1281 """show current repository heads
1282 1282
1283 1283 Show all repository head changesets.
1284 1284
1285 1285 Repository "heads" are changesets that don't have children
1286 1286 changesets. They are where development generally takes place and
1287 1287 are the usual targets for update and merge operations.
1288 1288 """
1289 heads = repo.changelog.heads()
1289 if opts['rev']:
1290 heads = repo.heads(repo.lookup(rev))
1291 else:
1292 heads = repo.heads()
1290 1293 br = None
1291 1294 if opts['branches']:
1292 br = repo.branchlookup(heads)
1293 for n in repo.changelog.heads():
1295 br = repo.branchlookup(list(heads))
1296 for n in heads:
1294 1297 show_changeset(ui, repo, changenode=n, brinfo=br)
1295 1298
1296 1299 def identify(ui, repo):
1297 1300 """print information about the working copy
1298 1301
1299 1302 Print a short summary of the current state of the repo.
1300 1303
1301 1304 This summary identifies the repository state using one or two parent
1302 1305 hash identifiers, followed by a "+" if there are uncommitted changes
1303 1306 in the working directory, followed by a list of tags for this revision.
1304 1307 """
1305 1308 parents = [p for p in repo.dirstate.parents() if p != nullid]
1306 1309 if not parents:
1307 1310 ui.write(_("unknown\n"))
1308 1311 return
1309 1312
1310 1313 hexfunc = ui.verbose and hex or short
1311 1314 (c, a, d, u) = repo.changes()
1312 1315 output = ["%s%s" % ('+'.join([hexfunc(parent) for parent in parents]),
1313 1316 (c or a or d) and "+" or "")]
1314 1317
1315 1318 if not ui.quiet:
1316 1319 # multiple tags for a single parent separated by '/'
1317 1320 parenttags = ['/'.join(tags)
1318 1321 for tags in map(repo.nodetags, parents) if tags]
1319 1322 # tags for multiple parents separated by ' + '
1320 1323 if parenttags:
1321 1324 output.append(' + '.join(parenttags))
1322 1325
1323 1326 ui.write("%s\n" % ' '.join(output))
1324 1327
1325 1328 def import_(ui, repo, patch1, *patches, **opts):
1326 1329 """import an ordered set of patches
1327 1330
1328 1331 Import a list of patches and commit them individually.
1329 1332
1330 1333 If there are outstanding changes in the working directory, import
1331 1334 will abort unless given the -f flag.
1332 1335
1333 1336 If a patch looks like a mail message (its first line starts with
1334 1337 "From " or looks like an RFC822 header), it will not be applied
1335 1338 unless the -f option is used. The importer neither parses nor
1336 1339 discards mail headers, so use -f only to override the "mailness"
1337 1340 safety check, not to import a real mail message.
1338 1341 """
1339 1342 patches = (patch1,) + patches
1340 1343
1341 1344 if not opts['force']:
1342 1345 (c, a, d, u) = repo.changes()
1343 1346 if c or a or d:
1344 1347 raise util.Abort(_("outstanding uncommitted changes"))
1345 1348
1346 1349 d = opts["base"]
1347 1350 strip = opts["strip"]
1348 1351
1349 1352 mailre = re.compile(r'(?:From |[\w-]+:)')
1350 1353
1351 1354 # attempt to detect the start of a patch
1352 1355 # (this heuristic is borrowed from quilt)
1353 1356 diffre = re.compile(r'(?:Index:[ \t]|diff[ \t]|RCS file: |' +
1354 1357 'retrieving revision [0-9]+(\.[0-9]+)*$|' +
1355 1358 '(---|\*\*\*)[ \t])')
1356 1359
1357 1360 for patch in patches:
1358 1361 ui.status(_("applying %s\n") % patch)
1359 1362 pf = os.path.join(d, patch)
1360 1363
1361 1364 message = []
1362 1365 user = None
1363 1366 hgpatch = False
1364 1367 for line in file(pf):
1365 1368 line = line.rstrip()
1366 1369 if (not message and not hgpatch and
1367 1370 mailre.match(line) and not opts['force']):
1368 1371 if len(line) > 35: line = line[:32] + '...'
1369 1372 raise util.Abort(_('first line looks like a '
1370 1373 'mail header: ') + line)
1371 1374 if diffre.match(line):
1372 1375 break
1373 1376 elif hgpatch:
1374 1377 # parse values when importing the result of an hg export
1375 1378 if line.startswith("# User "):
1376 1379 user = line[7:]
1377 1380 ui.debug(_('User: %s\n') % user)
1378 1381 elif not line.startswith("# ") and line:
1379 1382 message.append(line)
1380 1383 hgpatch = False
1381 1384 elif line == '# HG changeset patch':
1382 1385 hgpatch = True
1383 1386 message = [] # We may have collected garbage
1384 1387 else:
1385 1388 message.append(line)
1386 1389
1387 1390 # make sure message isn't empty
1388 1391 if not message:
1389 1392 message = _("imported patch %s\n") % patch
1390 1393 else:
1391 1394 message = "%s\n" % '\n'.join(message)
1392 1395 ui.debug(_('message:\n%s\n') % message)
1393 1396
1394 1397 files = util.patch(strip, pf, ui)
1395 1398
1396 1399 if len(files) > 0:
1397 1400 addremove(ui, repo, *files)
1398 1401 repo.commit(files, message, user)
1399 1402
1400 1403 def incoming(ui, repo, source="default", **opts):
1401 1404 """show new changesets found in source
1402 1405
1403 1406 Show new changesets found in the specified repo or the default
1404 1407 pull repo. These are the changesets that would be pulled if a pull
1405 1408 was requested.
1406 1409
1407 1410 Currently only local repositories are supported.
1408 1411 """
1409 1412 source = ui.expandpath(source, repo.root)
1410 1413 other = hg.repository(ui, source)
1411 1414 if not other.local():
1412 1415 raise util.Abort(_("incoming doesn't work for remote repositories yet"))
1413 1416 o = repo.findincoming(other)
1414 1417 if not o:
1415 1418 return
1416 1419 o = other.changelog.nodesbetween(o)[0]
1417 1420 if opts['newest_first']:
1418 1421 o.reverse()
1419 1422 for n in o:
1420 1423 parents = [p for p in other.changelog.parents(n) if p != nullid]
1421 1424 if opts['no_merges'] and len(parents) == 2:
1422 1425 continue
1423 1426 show_changeset(ui, other, changenode=n)
1424 1427 if opts['patch']:
1425 1428 prev = (parents and parents[0]) or nullid
1426 1429 dodiff(ui, ui, other, prev, n)
1427 1430 ui.write("\n")
1428 1431
1429 1432 def init(ui, dest="."):
1430 1433 """create a new repository in the given directory
1431 1434
1432 1435 Initialize a new repository in the given directory. If the given
1433 1436 directory does not exist, it is created.
1434 1437
1435 1438 If no directory is given, the current directory is used.
1436 1439 """
1437 1440 if not os.path.exists(dest):
1438 1441 os.mkdir(dest)
1439 1442 hg.repository(ui, dest, create=1)
1440 1443
1441 1444 def locate(ui, repo, *pats, **opts):
1442 1445 """locate files matching specific patterns
1443 1446
1444 1447 Print all files under Mercurial control whose names match the
1445 1448 given patterns.
1446 1449
1447 1450 This command searches the current directory and its
1448 1451 subdirectories. To search an entire repository, move to the root
1449 1452 of the repository.
1450 1453
1451 1454 If no patterns are given to match, this command prints all file
1452 1455 names.
1453 1456
1454 1457 If you want to feed the output of this command into the "xargs"
1455 1458 command, use the "-0" option to both this command and "xargs".
1456 1459 This will avoid the problem of "xargs" treating single filenames
1457 1460 that contain white space as multiple filenames.
1458 1461 """
1459 1462 end = opts['print0'] and '\0' or '\n'
1460 1463
1461 1464 for src, abs, rel, exact in walk(repo, pats, opts, '(?:.*/|)'):
1462 1465 if repo.dirstate.state(abs) == '?':
1463 1466 continue
1464 1467 if opts['fullpath']:
1465 1468 ui.write(os.path.join(repo.root, abs), end)
1466 1469 else:
1467 1470 ui.write(rel, end)
1468 1471
1469 1472 def log(ui, repo, *pats, **opts):
1470 1473 """show revision history of entire repository or files
1471 1474
1472 1475 Print the revision history of the specified files or the entire project.
1473 1476
1474 1477 By default this command outputs: changeset id and hash, tags,
1475 1478 parents, user, date and time, and a summary for each commit. The
1476 1479 -v switch adds some more detail, such as changed files, manifest
1477 1480 hashes or message signatures.
1478 1481 """
1479 1482 class dui:
1480 1483 # Implement and delegate some ui protocol. Save hunks of
1481 1484 # output for later display in the desired order.
1482 1485 def __init__(self, ui):
1483 1486 self.ui = ui
1484 1487 self.hunk = {}
1485 1488 def bump(self, rev):
1486 1489 self.rev = rev
1487 1490 self.hunk[rev] = []
1488 1491 def note(self, *args):
1489 1492 if self.verbose:
1490 1493 self.write(*args)
1491 1494 def status(self, *args):
1492 1495 if not self.quiet:
1493 1496 self.write(*args)
1494 1497 def write(self, *args):
1495 1498 self.hunk[self.rev].append(args)
1496 1499 def debug(self, *args):
1497 1500 if self.debugflag:
1498 1501 self.write(*args)
1499 1502 def __getattr__(self, key):
1500 1503 return getattr(self.ui, key)
1501 1504 cwd = repo.getcwd()
1502 1505 if not pats and cwd:
1503 1506 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
1504 1507 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
1505 1508 changeiter, getchange = walkchangerevs(ui, repo, (pats and cwd) or '',
1506 1509 pats, opts)
1507 1510 for st, rev, fns in changeiter:
1508 1511 if st == 'window':
1509 1512 du = dui(ui)
1510 1513 elif st == 'add':
1511 1514 du.bump(rev)
1512 1515 changenode = repo.changelog.node(rev)
1513 1516 parents = [p for p in repo.changelog.parents(changenode)
1514 1517 if p != nullid]
1515 1518 if opts['no_merges'] and len(parents) == 2:
1516 1519 continue
1517 1520 if opts['only_merges'] and len(parents) != 2:
1518 1521 continue
1519 1522
1520 1523 br = None
1521 1524 if opts['keyword']:
1522 1525 changes = repo.changelog.read(repo.changelog.node(rev))
1523 1526 miss = 0
1524 1527 for k in [kw.lower() for kw in opts['keyword']]:
1525 1528 if not (k in changes[1].lower() or
1526 1529 k in changes[4].lower() or
1527 1530 k in " ".join(changes[3][:20]).lower()):
1528 1531 miss = 1
1529 1532 break
1530 1533 if miss:
1531 1534 continue
1532 1535
1533 1536 if opts['branch']:
1534 1537 br = repo.branchlookup([repo.changelog.node(rev)])
1535 1538
1536 1539 show_changeset(du, repo, rev, brinfo=br)
1537 1540 if opts['patch']:
1538 1541 prev = (parents and parents[0]) or nullid
1539 1542 dodiff(du, du, repo, prev, changenode, fns)
1540 1543 du.write("\n\n")
1541 1544 elif st == 'iter':
1542 1545 for args in du.hunk[rev]:
1543 1546 ui.write(*args)
1544 1547
1545 1548 def manifest(ui, repo, rev=None):
1546 1549 """output the latest or given revision of the project manifest
1547 1550
1548 1551 Print a list of version controlled files for the given revision.
1549 1552
1550 1553 The manifest is the list of files being version controlled. If no revision
1551 1554 is given then the tip is used.
1552 1555 """
1553 1556 if rev:
1554 1557 try:
1555 1558 # assume all revision numbers are for changesets
1556 1559 n = repo.lookup(rev)
1557 1560 change = repo.changelog.read(n)
1558 1561 n = change[0]
1559 1562 except hg.RepoError:
1560 1563 n = repo.manifest.lookup(rev)
1561 1564 else:
1562 1565 n = repo.manifest.tip()
1563 1566 m = repo.manifest.read(n)
1564 1567 mf = repo.manifest.readflags(n)
1565 1568 files = m.keys()
1566 1569 files.sort()
1567 1570
1568 1571 for f in files:
1569 1572 ui.write("%40s %3s %s\n" % (hex(m[f]), mf[f] and "755" or "644", f))
1570 1573
1571 1574 def outgoing(ui, repo, dest="default-push", **opts):
1572 1575 """show changesets not found in destination
1573 1576
1574 1577 Show changesets not found in the specified destination repo or the
1575 1578 default push repo. These are the changesets that would be pushed
1576 1579 if a push was requested.
1577 1580 """
1578 1581 dest = ui.expandpath(dest, repo.root)
1579 1582 other = hg.repository(ui, dest)
1580 1583 o = repo.findoutgoing(other)
1581 1584 o = repo.changelog.nodesbetween(o)[0]
1582 1585 if opts['newest_first']:
1583 1586 o.reverse()
1584 1587 for n in o:
1585 1588 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1586 1589 if opts['no_merges'] and len(parents) == 2:
1587 1590 continue
1588 1591 show_changeset(ui, repo, changenode=n)
1589 1592 if opts['patch']:
1590 1593 prev = (parents and parents[0]) or nullid
1591 1594 dodiff(ui, ui, repo, prev, n)
1592 1595 ui.write("\n")
1593 1596
1594 1597 def parents(ui, repo, rev=None):
1595 1598 """show the parents of the working dir or revision
1596 1599
1597 1600 Print the working directory's parent revisions.
1598 1601 """
1599 1602 if rev:
1600 1603 p = repo.changelog.parents(repo.lookup(rev))
1601 1604 else:
1602 1605 p = repo.dirstate.parents()
1603 1606
1604 1607 for n in p:
1605 1608 if n != nullid:
1606 1609 show_changeset(ui, repo, changenode=n)
1607 1610
1608 1611 def paths(ui, search=None):
1609 1612 """show definition of symbolic path names
1610 1613
1611 1614 Show definition of symbolic path name NAME. If no name is given, show
1612 1615 definition of available names.
1613 1616
1614 1617 Path names are defined in the [paths] section of /etc/mercurial/hgrc
1615 1618 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
1616 1619 """
1617 1620 try:
1618 1621 repo = hg.repository(ui=ui)
1619 1622 except hg.RepoError:
1620 1623 pass
1621 1624
1622 1625 if search:
1623 1626 for name, path in ui.configitems("paths"):
1624 1627 if name == search:
1625 1628 ui.write("%s\n" % path)
1626 1629 return
1627 1630 ui.warn(_("not found!\n"))
1628 1631 return 1
1629 1632 else:
1630 1633 for name, path in ui.configitems("paths"):
1631 1634 ui.write("%s = %s\n" % (name, path))
1632 1635
1633 1636 def pull(ui, repo, source="default", **opts):
1634 1637 """pull changes from the specified source
1635 1638
1636 1639 Pull changes from a remote repository to a local one.
1637 1640
1638 1641 This finds all changes from the repository at the specified path
1639 1642 or URL and adds them to the local repository. By default, this
1640 1643 does not update the copy of the project in the working directory.
1641 1644
1642 1645 Valid URLs are of the form:
1643 1646
1644 1647 local/filesystem/path
1645 1648 http://[user@]host[:port][/path]
1646 1649 https://[user@]host[:port][/path]
1647 1650 ssh://[user@]host[:port][/path]
1648 1651
1649 1652 SSH requires an accessible shell account on the destination machine
1650 1653 and a copy of hg in the remote path. With SSH, paths are relative
1651 1654 to the remote user's home directory by default; use two slashes at
1652 1655 the start of a path to specify it as relative to the filesystem root.
1653 1656 """
1654 1657 source = ui.expandpath(source, repo.root)
1655 1658 ui.status(_('pulling from %s\n') % (source))
1656 1659
1657 1660 if opts['ssh']:
1658 1661 ui.setconfig("ui", "ssh", opts['ssh'])
1659 1662 if opts['remotecmd']:
1660 1663 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
1661 1664
1662 1665 other = hg.repository(ui, source)
1663 1666 revs = None
1664 1667 if opts['rev'] and not other.local():
1665 1668 raise util.Abort("pull -r doesn't work for remote repositories yet")
1666 1669 elif opts['rev']:
1667 1670 revs = [other.lookup(rev) for rev in opts['rev']]
1668 1671 r = repo.pull(other, heads=revs)
1669 1672 if not r:
1670 1673 if opts['update']:
1671 1674 return update(ui, repo)
1672 1675 else:
1673 1676 ui.status(_("(run 'hg update' to get a working copy)\n"))
1674 1677
1675 1678 return r
1676 1679
1677 1680 def push(ui, repo, dest="default-push", force=False, ssh=None, remotecmd=None):
1678 1681 """push changes to the specified destination
1679 1682
1680 1683 Push changes from the local repository to the given destination.
1681 1684
1682 1685 This is the symmetrical operation for pull. It helps to move
1683 1686 changes from the current repository to a different one. If the
1684 1687 destination is local this is identical to a pull in that directory
1685 1688 from the current one.
1686 1689
1687 1690 By default, push will refuse to run if it detects the result would
1688 1691 increase the number of remote heads. This generally indicates the
1689 1692 the client has forgotten to sync and merge before pushing.
1690 1693
1691 1694 Valid URLs are of the form:
1692 1695
1693 1696 local/filesystem/path
1694 1697 ssh://[user@]host[:port][/path]
1695 1698
1696 1699 SSH requires an accessible shell account on the destination
1697 1700 machine and a copy of hg in the remote path.
1698 1701 """
1699 1702 dest = ui.expandpath(dest, repo.root)
1700 1703 ui.status('pushing to %s\n' % (dest))
1701 1704
1702 1705 if ssh:
1703 1706 ui.setconfig("ui", "ssh", ssh)
1704 1707 if remotecmd:
1705 1708 ui.setconfig("ui", "remotecmd", remotecmd)
1706 1709
1707 1710 other = hg.repository(ui, dest)
1708 1711 r = repo.push(other, force)
1709 1712 return r
1710 1713
1711 1714 def rawcommit(ui, repo, *flist, **rc):
1712 1715 """raw commit interface
1713 1716
1714 1717 Lowlevel commit, for use in helper scripts.
1715 1718
1716 1719 This command is not intended to be used by normal users, as it is
1717 1720 primarily useful for importing from other SCMs.
1718 1721 """
1719 1722 message = rc['message']
1720 1723 if not message and rc['logfile']:
1721 1724 try:
1722 1725 message = open(rc['logfile']).read()
1723 1726 except IOError:
1724 1727 pass
1725 1728 if not message and not rc['logfile']:
1726 1729 raise util.Abort(_("missing commit message"))
1727 1730
1728 1731 files = relpath(repo, list(flist))
1729 1732 if rc['files']:
1730 1733 files += open(rc['files']).read().splitlines()
1731 1734
1732 1735 rc['parent'] = map(repo.lookup, rc['parent'])
1733 1736
1734 1737 try:
1735 1738 repo.rawcommit(files, message, rc['user'], rc['date'], *rc['parent'])
1736 1739 except ValueError, inst:
1737 1740 raise util.Abort(str(inst))
1738 1741
1739 1742 def recover(ui, repo):
1740 1743 """roll back an interrupted transaction
1741 1744
1742 1745 Recover from an interrupted commit or pull.
1743 1746
1744 1747 This command tries to fix the repository status after an interrupted
1745 1748 operation. It should only be necessary when Mercurial suggests it.
1746 1749 """
1747 1750 if repo.recover():
1748 1751 return repo.verify()
1749 1752 return False
1750 1753
1751 1754 def remove(ui, repo, pat, *pats, **opts):
1752 1755 """remove the specified files on the next commit
1753 1756
1754 1757 Schedule the indicated files for removal from the repository.
1755 1758
1756 1759 This command schedules the files to be removed at the next commit.
1757 1760 This only removes files from the current branch, not from the
1758 1761 entire project history. If the files still exist in the working
1759 1762 directory, they will be deleted from it.
1760 1763 """
1761 1764 names = []
1762 1765 def okaytoremove(abs, rel, exact):
1763 1766 c, a, d, u = repo.changes(files = [abs])
1764 1767 reason = None
1765 1768 if c: reason = _('is modified')
1766 1769 elif a: reason = _('has been marked for add')
1767 1770 elif u: reason = _('is not managed')
1768 1771 if reason:
1769 1772 if exact: ui.warn(_('not removing %s: file %s\n') % (rel, reason))
1770 1773 else:
1771 1774 return True
1772 1775 for src, abs, rel, exact in walk(repo, (pat,) + pats, opts):
1773 1776 if okaytoremove(abs, rel, exact):
1774 1777 if ui.verbose or not exact: ui.status(_('removing %s\n') % rel)
1775 1778 names.append(abs)
1776 1779 repo.remove(names, unlink=True)
1777 1780
1778 1781 def rename(ui, repo, *pats, **opts):
1779 1782 """rename files; equivalent of copy + remove
1780 1783
1781 1784 Mark dest as copies of sources; mark sources for deletion. If
1782 1785 dest is a directory, copies are put in that directory. If dest is
1783 1786 a file, there can only be one source.
1784 1787
1785 1788 By default, this command copies the contents of files as they
1786 1789 stand in the working directory. If invoked with --after, the
1787 1790 operation is recorded, but no copying is performed.
1788 1791
1789 1792 This command takes effect in the next commit.
1790 1793
1791 1794 NOTE: This command should be treated as experimental. While it
1792 1795 should properly record rename files, this information is not yet
1793 1796 fully used by merge, nor fully reported by log.
1794 1797 """
1795 1798 errs, copied = docopy(ui, repo, pats, opts)
1796 1799 names = []
1797 1800 for abs, rel, exact in copied:
1798 1801 if ui.verbose or not exact: ui.status(_('removing %s\n') % rel)
1799 1802 names.append(abs)
1800 1803 repo.remove(names, unlink=True)
1801 1804 return errs
1802 1805
1803 1806 def revert(ui, repo, *pats, **opts):
1804 1807 """revert modified files or dirs back to their unmodified states
1805 1808
1806 1809 Revert any uncommitted modifications made to the named files or
1807 1810 directories. This restores the contents of the affected files to
1808 1811 an unmodified state.
1809 1812
1810 1813 If a file has been deleted, it is recreated. If the executable
1811 1814 mode of a file was changed, it is reset.
1812 1815
1813 1816 If names are given, all files matching the names are reverted.
1814 1817
1815 1818 If no names are given, all files in the current directory and
1816 1819 its subdirectories are reverted.
1817 1820 """
1818 1821 node = opts['rev'] and repo.lookup(opts['rev']) or \
1819 1822 repo.dirstate.parents()[0]
1820 1823
1821 1824 files, choose, anypats = matchpats(repo, repo.getcwd(), pats, opts)
1822 1825 (c, a, d, u) = repo.changes(match=choose)
1823 1826 repo.forget(a)
1824 1827 repo.undelete(d)
1825 1828
1826 1829 return repo.update(node, False, True, choose, False)
1827 1830
1828 1831 def root(ui, repo):
1829 1832 """print the root (top) of the current working dir
1830 1833
1831 1834 Print the root directory of the current repository.
1832 1835 """
1833 1836 ui.write(repo.root + "\n")
1834 1837
1835 1838 def serve(ui, repo, **opts):
1836 1839 """export the repository via HTTP
1837 1840
1838 1841 Start a local HTTP repository browser and pull server.
1839 1842
1840 1843 By default, the server logs accesses to stdout and errors to
1841 1844 stderr. Use the "-A" and "-E" options to log to files.
1842 1845 """
1843 1846
1844 1847 if opts["stdio"]:
1845 1848 fin, fout = sys.stdin, sys.stdout
1846 1849 sys.stdout = sys.stderr
1847 1850
1848 1851 # Prevent insertion/deletion of CRs
1849 1852 util.set_binary(fin)
1850 1853 util.set_binary(fout)
1851 1854
1852 1855 def getarg():
1853 1856 argline = fin.readline()[:-1]
1854 1857 arg, l = argline.split()
1855 1858 val = fin.read(int(l))
1856 1859 return arg, val
1857 1860 def respond(v):
1858 1861 fout.write("%d\n" % len(v))
1859 1862 fout.write(v)
1860 1863 fout.flush()
1861 1864
1862 1865 lock = None
1863 1866
1864 1867 while 1:
1865 1868 cmd = fin.readline()[:-1]
1866 1869 if cmd == '':
1867 1870 return
1868 1871 if cmd == "heads":
1869 1872 h = repo.heads()
1870 1873 respond(" ".join(map(hex, h)) + "\n")
1871 1874 if cmd == "lock":
1872 1875 lock = repo.lock()
1873 1876 respond("")
1874 1877 if cmd == "unlock":
1875 1878 if lock:
1876 1879 lock.release()
1877 1880 lock = None
1878 1881 respond("")
1879 1882 elif cmd == "branches":
1880 1883 arg, nodes = getarg()
1881 1884 nodes = map(bin, nodes.split(" "))
1882 1885 r = []
1883 1886 for b in repo.branches(nodes):
1884 1887 r.append(" ".join(map(hex, b)) + "\n")
1885 1888 respond("".join(r))
1886 1889 elif cmd == "between":
1887 1890 arg, pairs = getarg()
1888 1891 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
1889 1892 r = []
1890 1893 for b in repo.between(pairs):
1891 1894 r.append(" ".join(map(hex, b)) + "\n")
1892 1895 respond("".join(r))
1893 1896 elif cmd == "changegroup":
1894 1897 nodes = []
1895 1898 arg, roots = getarg()
1896 1899 nodes = map(bin, roots.split(" "))
1897 1900
1898 1901 cg = repo.changegroup(nodes)
1899 1902 while 1:
1900 1903 d = cg.read(4096)
1901 1904 if not d:
1902 1905 break
1903 1906 fout.write(d)
1904 1907
1905 1908 fout.flush()
1906 1909
1907 1910 elif cmd == "addchangegroup":
1908 1911 if not lock:
1909 1912 respond("not locked")
1910 1913 continue
1911 1914 respond("")
1912 1915
1913 1916 r = repo.addchangegroup(fin)
1914 1917 respond("")
1915 1918
1916 1919 optlist = "name templates style address port ipv6 accesslog errorlog"
1917 1920 for o in optlist.split():
1918 1921 if opts[o]:
1919 1922 ui.setconfig("web", o, opts[o])
1920 1923
1921 1924 try:
1922 1925 httpd = hgweb.create_server(repo)
1923 1926 except socket.error, inst:
1924 1927 raise util.Abort('cannot start server: ' + inst.args[1])
1925 1928
1926 1929 if ui.verbose:
1927 1930 addr, port = httpd.socket.getsockname()
1928 1931 if addr == '0.0.0.0':
1929 1932 addr = socket.gethostname()
1930 1933 else:
1931 1934 try:
1932 1935 addr = socket.gethostbyaddr(addr)[0]
1933 1936 except socket.error:
1934 1937 pass
1935 1938 if port != 80:
1936 1939 ui.status(_('listening at http://%s:%d/\n') % (addr, port))
1937 1940 else:
1938 1941 ui.status(_('listening at http://%s/\n') % addr)
1939 1942 httpd.serve_forever()
1940 1943
1941 1944 def status(ui, repo, *pats, **opts):
1942 1945 """show changed files in the working directory
1943 1946
1944 1947 Show changed files in the working directory. If no names are
1945 1948 given, all files are shown. Otherwise, only files matching the
1946 1949 given names are shown.
1947 1950
1948 1951 The codes used to show the status of files are:
1949 1952 M = modified
1950 1953 A = added
1951 1954 R = removed
1952 1955 ? = not tracked
1953 1956 """
1954 1957
1955 1958 cwd = repo.getcwd()
1956 1959 files, matchfn, anypats = matchpats(repo, cwd, pats, opts)
1957 1960 (c, a, d, u) = [[util.pathto(cwd, x) for x in n]
1958 1961 for n in repo.changes(files=files, match=matchfn)]
1959 1962
1960 1963 changetypes = [(_('modified'), 'M', c),
1961 1964 (_('added'), 'A', a),
1962 1965 (_('removed'), 'R', d),
1963 1966 (_('unknown'), '?', u)]
1964 1967
1965 1968 end = opts['print0'] and '\0' or '\n'
1966 1969
1967 1970 for opt, char, changes in ([ct for ct in changetypes if opts[ct[0]]]
1968 1971 or changetypes):
1969 1972 if opts['no_status']:
1970 1973 format = "%%s%s" % end
1971 1974 else:
1972 1975 format = "%s %%s%s" % (char, end);
1973 1976
1974 1977 for f in changes:
1975 1978 ui.write(format % f)
1976 1979
1977 1980 def tag(ui, repo, name, rev=None, **opts):
1978 1981 """add a tag for the current tip or a given revision
1979 1982
1980 1983 Name a particular revision using <name>.
1981 1984
1982 1985 Tags are used to name particular revisions of the repository and are
1983 1986 very useful to compare different revision, to go back to significant
1984 1987 earlier versions or to mark branch points as releases, etc.
1985 1988
1986 1989 If no revision is given, the tip is used.
1987 1990
1988 1991 To facilitate version control, distribution, and merging of tags,
1989 1992 they are stored as a file named ".hgtags" which is managed
1990 1993 similarly to other project files and can be hand-edited if
1991 1994 necessary.
1992 1995 """
1993 1996 if name == "tip":
1994 1997 raise util.Abort(_("the name 'tip' is reserved"))
1995 1998 if 'rev' in opts:
1996 1999 rev = opts['rev']
1997 2000 if rev:
1998 2001 r = hex(repo.lookup(rev))
1999 2002 else:
2000 2003 r = hex(repo.changelog.tip())
2001 2004
2002 2005 if name.find(revrangesep) >= 0:
2003 2006 raise util.Abort(_("'%s' cannot be used in a tag name") % revrangesep)
2004 2007
2005 2008 if opts['local']:
2006 2009 repo.opener("localtags", "a").write("%s %s\n" % (r, name))
2007 2010 return
2008 2011
2009 2012 (c, a, d, u) = repo.changes()
2010 2013 for x in (c, a, d, u):
2011 2014 if ".hgtags" in x:
2012 2015 raise util.Abort(_("working copy of .hgtags is changed "
2013 2016 "(please commit .hgtags manually)"))
2014 2017
2015 2018 repo.wfile(".hgtags", "ab").write("%s %s\n" % (r, name))
2016 2019 if repo.dirstate.state(".hgtags") == '?':
2017 2020 repo.add([".hgtags"])
2018 2021
2019 2022 message = (opts['message'] or
2020 2023 _("Added tag %s for changeset %s") % (name, r))
2021 2024 try:
2022 2025 repo.commit([".hgtags"], message, opts['user'], opts['date'])
2023 2026 except ValueError, inst:
2024 2027 raise util.Abort(str(inst))
2025 2028
2026 2029 def tags(ui, repo):
2027 2030 """list repository tags
2028 2031
2029 2032 List the repository tags.
2030 2033
2031 2034 This lists both regular and local tags.
2032 2035 """
2033 2036
2034 2037 l = repo.tagslist()
2035 2038 l.reverse()
2036 2039 for t, n in l:
2037 2040 try:
2038 2041 r = "%5d:%s" % (repo.changelog.rev(n), hex(n))
2039 2042 except KeyError:
2040 2043 r = " ?:?"
2041 2044 ui.write("%-30s %s\n" % (t, r))
2042 2045
2043 2046 def tip(ui, repo):
2044 2047 """show the tip revision
2045 2048
2046 2049 Show the tip revision.
2047 2050 """
2048 2051 n = repo.changelog.tip()
2049 2052 show_changeset(ui, repo, changenode=n)
2050 2053
2051 2054 def unbundle(ui, repo, fname):
2052 2055 """apply a changegroup file
2053 2056
2054 2057 Apply a compressed changegroup file generated by the bundle
2055 2058 command.
2056 2059 """
2057 2060 f = urllib.urlopen(fname)
2058 2061
2059 2062 if f.read(4) != "HG10":
2060 2063 raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
2061 2064
2062 2065 def bzgenerator(f):
2063 2066 zd = bz2.BZ2Decompressor()
2064 2067 for chunk in f:
2065 2068 yield zd.decompress(chunk)
2066 2069
2067 2070 bzgen = bzgenerator(util.filechunkiter(f, 4096))
2068 2071 repo.addchangegroup(util.chunkbuffer(bzgen))
2069 2072
2070 2073 def undo(ui, repo):
2071 2074 """undo the last commit or pull
2072 2075
2073 2076 Roll back the last pull or commit transaction on the
2074 2077 repository, restoring the project to its earlier state.
2075 2078
2076 2079 This command should be used with care. There is only one level of
2077 2080 undo and there is no redo.
2078 2081
2079 2082 This command is not intended for use on public repositories. Once
2080 2083 a change is visible for pull by other users, undoing it locally is
2081 2084 ineffective.
2082 2085 """
2083 2086 repo.undo()
2084 2087
2085 2088 def update(ui, repo, node=None, merge=False, clean=False, branch=None):
2086 2089 """update or merge working directory
2087 2090
2088 2091 Update the working directory to the specified revision.
2089 2092
2090 2093 If there are no outstanding changes in the working directory and
2091 2094 there is a linear relationship between the current version and the
2092 2095 requested version, the result is the requested version.
2093 2096
2094 2097 Otherwise the result is a merge between the contents of the
2095 2098 current working directory and the requested version. Files that
2096 2099 changed between either parent are marked as changed for the next
2097 2100 commit and a commit must be performed before any further updates
2098 2101 are allowed.
2099 2102
2100 2103 By default, update will refuse to run if doing so would require
2101 2104 merging or discarding local changes.
2102 2105 """
2103 2106 if branch:
2104 2107 br = repo.branchlookup(branch=branch)
2105 2108 found = []
2106 2109 for x in br:
2107 2110 if branch in br[x]:
2108 2111 found.append(x)
2109 2112 if len(found) > 1:
2110 2113 ui.warn(_("Found multiple heads for %s\n") % branch)
2111 2114 for x in found:
2112 2115 show_changeset(ui, repo, changenode=x, brinfo=br)
2113 2116 return 1
2114 2117 if len(found) == 1:
2115 2118 node = found[0]
2116 2119 ui.warn(_("Using head %s for branch %s\n") % (short(node), branch))
2117 2120 else:
2118 2121 ui.warn(_("branch %s not found\n") % (branch))
2119 2122 return 1
2120 2123 else:
2121 2124 node = node and repo.lookup(node) or repo.changelog.tip()
2122 2125 return repo.update(node, allow=merge, force=clean)
2123 2126
2124 2127 def verify(ui, repo):
2125 2128 """verify the integrity of the repository
2126 2129
2127 2130 Verify the integrity of the current repository.
2128 2131
2129 2132 This will perform an extensive check of the repository's
2130 2133 integrity, validating the hashes and checksums of each entry in
2131 2134 the changelog, manifest, and tracked files, as well as the
2132 2135 integrity of their crosslinks and indices.
2133 2136 """
2134 2137 return repo.verify()
2135 2138
2136 2139 # Command options and aliases are listed here, alphabetically
2137 2140
2138 2141 table = {
2139 2142 "^add":
2140 2143 (add,
2141 2144 [('I', 'include', [], _('include names matching the given patterns')),
2142 2145 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2143 2146 "hg add [OPTION]... [FILE]..."),
2144 2147 "addremove":
2145 2148 (addremove,
2146 2149 [('I', 'include', [], _('include names matching the given patterns')),
2147 2150 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2148 2151 "hg addremove [OPTION]... [FILE]..."),
2149 2152 "^annotate":
2150 2153 (annotate,
2151 2154 [('r', 'rev', '', _('annotate the specified revision')),
2152 2155 ('a', 'text', None, _('treat all files as text')),
2153 2156 ('u', 'user', None, _('list the author')),
2154 2157 ('d', 'date', None, _('list the date')),
2155 2158 ('n', 'number', None, _('list the revision number (default)')),
2156 2159 ('c', 'changeset', None, _('list the changeset')),
2157 2160 ('I', 'include', [], _('include names matching the given patterns')),
2158 2161 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2159 2162 _('hg annotate [OPTION]... FILE...')),
2160 2163 "bundle":
2161 2164 (bundle,
2162 2165 [],
2163 2166 _('hg bundle FILE DEST')),
2164 2167 "cat":
2165 2168 (cat,
2166 2169 [('I', 'include', [], _('include names matching the given patterns')),
2167 2170 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2168 2171 ('o', 'output', "", _('print output to file with formatted name')),
2169 2172 ('r', 'rev', '', _('print the given revision'))],
2170 2173 _('hg cat [OPTION]... FILE...')),
2171 2174 "^clone":
2172 2175 (clone,
2173 2176 [('U', 'noupdate', None, _('do not update the new working directory')),
2174 2177 ('e', 'ssh', "", _('specify ssh command to use')),
2175 2178 ('', 'pull', None, _('use pull protocol to copy metadata')),
2176 2179 ('r', 'rev', [], _('a changeset you would like to have after cloning')),
2177 2180 ('', 'remotecmd', "", _('specify hg command to run on the remote side'))],
2178 2181 _('hg clone [OPTION]... SOURCE [DEST]')),
2179 2182 "^commit|ci":
2180 2183 (commit,
2181 2184 [('A', 'addremove', None, _('run addremove during commit')),
2182 2185 ('I', 'include', [], _('include names matching the given patterns')),
2183 2186 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2184 2187 ('m', 'message', "", _('use <text> as commit message')),
2185 2188 ('l', 'logfile', "", _('read the commit message from <file>')),
2186 2189 ('d', 'date', "", _('record datecode as commit date')),
2187 2190 ('u', 'user', "", _('record user as commiter'))],
2188 2191 _('hg commit [OPTION]... [FILE]...')),
2189 2192 "copy|cp": (copy,
2190 2193 [('I', 'include', [], _('include names matching the given patterns')),
2191 2194 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2192 2195 ('A', 'after', None, _('record a copy that has already occurred')),
2193 2196 ('f', 'force', None, _('forcibly copy over an existing managed file'))],
2194 2197 _('hg copy [OPTION]... [SOURCE]... DEST')),
2195 2198 "debugancestor": (debugancestor, [], _('debugancestor INDEX REV1 REV2')),
2196 2199 "debugcheckstate": (debugcheckstate, [], _('debugcheckstate')),
2197 2200 "debugconfig": (debugconfig, [], _('debugconfig')),
2198 2201 "debugsetparents": (debugsetparents, [], _('debugsetparents REV1 [REV2]')),
2199 2202 "debugstate": (debugstate, [], _('debugstate')),
2200 2203 "debugdata": (debugdata, [], _('debugdata FILE REV')),
2201 2204 "debugindex": (debugindex, [], _('debugindex FILE')),
2202 2205 "debugindexdot": (debugindexdot, [], _('debugindexdot FILE')),
2203 2206 "debugrename": (debugrename, [], _('debugrename FILE [REV]')),
2204 2207 "debugwalk":
2205 2208 (debugwalk,
2206 2209 [('I', 'include', [], _('include names matching the given patterns')),
2207 2210 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2208 2211 _('debugwalk [OPTION]... [FILE]...')),
2209 2212 "^diff":
2210 2213 (diff,
2211 2214 [('r', 'rev', [], _('revision')),
2212 2215 ('a', 'text', None, _('treat all files as text')),
2213 2216 ('I', 'include', [], _('include names matching the given patterns')),
2214 2217 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2215 2218 _('hg diff [-a] [-I] [-X] [-r REV1 [-r REV2]] [FILE]...')),
2216 2219 "^export":
2217 2220 (export,
2218 2221 [('o', 'output', "", _('print output to file with formatted name')),
2219 2222 ('a', 'text', None, _('treat all files as text'))],
2220 2223 "hg export [-a] [-o OUTFILE] REV..."),
2221 2224 "forget":
2222 2225 (forget,
2223 2226 [('I', 'include', [], _('include names matching the given patterns')),
2224 2227 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2225 2228 "hg forget [OPTION]... FILE..."),
2226 2229 "grep":
2227 2230 (grep,
2228 2231 [('0', 'print0', None, _('end fields with NUL')),
2229 2232 ('I', 'include', [], _('include names matching the given patterns')),
2230 2233 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2231 2234 ('', 'all', None, _('print all revisions that match')),
2232 2235 ('i', 'ignore-case', None, _('ignore case when matching')),
2233 2236 ('l', 'files-with-matches', None, _('print only filenames and revs that match')),
2234 2237 ('n', 'line-number', None, _('print matching line numbers')),
2235 2238 ('r', 'rev', [], _('search in given revision range')),
2236 2239 ('u', 'user', None, _('print user who committed change'))],
2237 2240 "hg grep [OPTION]... PATTERN [FILE]..."),
2238 2241 "heads":
2239 2242 (heads,
2240 [('b', 'branches', None, _('find branch info'))],
2241 _('hg heads [-b]')),
2243 [('b', 'branches', None, _('find branch info')),
2244 ('r', 'rev', None, _('show only heads descendants from rev'))],
2245 _('hg heads [-b] [-r <rev>]')),
2242 2246 "help": (help_, [], _('hg help [COMMAND]')),
2243 2247 "identify|id": (identify, [], _('hg identify')),
2244 2248 "import|patch":
2245 2249 (import_,
2246 2250 [('p', 'strip', 1, _('directory strip option for patch. This has the same\n') +
2247 2251 _('meaning as the corresponding patch option')),
2248 2252 ('f', 'force', None, _('skip check for outstanding uncommitted changes')),
2249 2253 ('b', 'base', "", _('base path'))],
2250 2254 "hg import [-f] [-p NUM] [-b BASE] PATCH..."),
2251 2255 "incoming|in": (incoming,
2252 2256 [('M', 'no-merges', None, _("do not show merges")),
2253 2257 ('p', 'patch', None, _('show patch')),
2254 2258 ('n', 'newest-first', None, _('show newest record first'))],
2255 2259 _('hg incoming [-p] [-n] [-M] [SOURCE]')),
2256 2260 "^init": (init, [], _('hg init [DEST]')),
2257 2261 "locate":
2258 2262 (locate,
2259 2263 [('r', 'rev', '', _('search the repository as it stood at rev')),
2260 2264 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
2261 2265 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
2262 2266 ('I', 'include', [], _('include names matching the given patterns')),
2263 2267 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2264 2268 _('hg locate [OPTION]... [PATTERN]...')),
2265 2269 "^log|history":
2266 2270 (log,
2267 2271 [('I', 'include', [], _('include names matching the given patterns')),
2268 2272 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2269 2273 ('b', 'branch', None, _('show branches')),
2270 2274 ('k', 'keyword', [], _('search for a keyword')),
2271 2275 ('r', 'rev', [], _('show the specified revision or range')),
2272 2276 ('M', 'no-merges', None, _("do not show merges")),
2273 2277 ('m', 'only-merges', None, _("show only merges")),
2274 2278 ('p', 'patch', None, _('show patch'))],
2275 2279 _('hg log [-I] [-X] [-r REV]... [-p] [FILE]')),
2276 2280 "manifest": (manifest, [], _('hg manifest [REV]')),
2277 2281 "outgoing|out": (outgoing,
2278 2282 [('M', 'no-merges', None, _("do not show merges")),
2279 2283 ('p', 'patch', None, _('show patch')),
2280 2284 ('n', 'newest-first', None, _('show newest record first'))],
2281 2285 _('hg outgoing [-p] [-n] [-M] [DEST]')),
2282 2286 "^parents": (parents, [], _('hg parents [REV]')),
2283 2287 "paths": (paths, [], _('hg paths [NAME]')),
2284 2288 "^pull":
2285 2289 (pull,
2286 2290 [('u', 'update', None, _('update the working directory to tip after pull')),
2287 2291 ('e', 'ssh', "", _('specify ssh command to use')),
2288 2292 ('r', 'rev', [], _('a specific revision you would like to pull')),
2289 2293 ('', 'remotecmd', "", _('specify hg command to run on the remote side'))],
2290 2294 _('hg pull [-u] [-e FILE] [-r rev] [--remotecmd FILE] [SOURCE]')),
2291 2295 "^push":
2292 2296 (push,
2293 2297 [('f', 'force', None, _('force push')),
2294 2298 ('e', 'ssh', "", _('specify ssh command to use')),
2295 2299 ('', 'remotecmd', "", _('specify hg command to run on the remote side'))],
2296 2300 _('hg push [-f] [-e FILE] [--remotecmd FILE] [DEST]')),
2297 2301 "rawcommit":
2298 2302 (rawcommit,
2299 2303 [('p', 'parent', [], _('parent')),
2300 2304 ('d', 'date', "", _('date code')),
2301 2305 ('u', 'user', "", _('user')),
2302 2306 ('F', 'files', "", _('file list')),
2303 2307 ('m', 'message', "", _('commit message')),
2304 2308 ('l', 'logfile', "", _('commit message file'))],
2305 2309 _('hg rawcommit [OPTION]... [FILE]...')),
2306 2310 "recover": (recover, [], _("hg recover")),
2307 2311 "^remove|rm": (remove,
2308 2312 [('I', 'include', [], _('include names matching the given patterns')),
2309 2313 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2310 2314 _("hg remove [OPTION]... FILE...")),
2311 2315 "rename|mv": (rename,
2312 2316 [('I', 'include', [], _('include names matching the given patterns')),
2313 2317 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2314 2318 ('A', 'after', None, _('record a rename that has already occurred')),
2315 2319 ('f', 'force', None, _('forcibly copy over an existing managed file'))],
2316 2320 _('hg rename [OPTION]... [SOURCE]... DEST')),
2317 2321 "^revert":
2318 2322 (revert,
2319 2323 [('I', 'include', [], _('include names matching the given patterns')),
2320 2324 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2321 2325 ("r", "rev", "", _("revision to revert to"))],
2322 2326 _("hg revert [-n] [-r REV] [NAME]...")),
2323 2327 "root": (root, [], _("hg root")),
2324 2328 "^serve":
2325 2329 (serve,
2326 2330 [('A', 'accesslog', '', _('name of access log file to write to')),
2327 2331 ('E', 'errorlog', '', _('name of error log file to write to')),
2328 2332 ('p', 'port', 0, _('port to use (default: 8000)')),
2329 2333 ('a', 'address', '', _('address to use')),
2330 2334 ('n', 'name', "", _('name to show in web pages (default: working dir)')),
2331 2335 ('', 'stdio', None, _('for remote clients')),
2332 2336 ('t', 'templates', "", _('web templates to use')),
2333 2337 ('', 'style', "", _('template style to use')),
2334 2338 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4'))],
2335 2339 _("hg serve [OPTION]...")),
2336 2340 "^status|st":
2337 2341 (status,
2338 2342 [('m', 'modified', None, _('show only modified files')),
2339 2343 ('a', 'added', None, _('show only added files')),
2340 2344 ('r', 'removed', None, _('show only removed files')),
2341 2345 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
2342 2346 ('n', 'no-status', None, _('hide status prefix')),
2343 2347 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
2344 2348 ('I', 'include', [], _('include names matching the given patterns')),
2345 2349 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2346 2350 _("hg status [OPTION]... [FILE]...")),
2347 2351 "tag":
2348 2352 (tag,
2349 2353 [('l', 'local', None, _('make the tag local')),
2350 2354 ('m', 'message', "", _('message for tag commit log entry')),
2351 2355 ('d', 'date', "", _('record datecode as commit date')),
2352 2356 ('u', 'user', "", _('record user as commiter')),
2353 2357 ('r', 'rev', "", _('revision to tag'))],
2354 2358 _('hg tag [OPTION]... NAME [REV]')),
2355 2359 "tags": (tags, [], _('hg tags')),
2356 2360 "tip": (tip, [], _('hg tip')),
2357 2361 "unbundle":
2358 2362 (unbundle,
2359 2363 [],
2360 2364 _('hg unbundle FILE')),
2361 2365 "undo": (undo, [], _('hg undo')),
2362 2366 "^update|up|checkout|co":
2363 2367 (update,
2364 2368 [('b', 'branch', "", _('checkout the head of a specific branch')),
2365 2369 ('m', 'merge', None, _('allow merging of branches')),
2366 2370 ('C', 'clean', None, _('overwrite locally modified files'))],
2367 2371 _('hg update [-b TAG] [-m] [-C] [REV]')),
2368 2372 "verify": (verify, [], _('hg verify')),
2369 2373 "version": (show_version, [], _('hg version')),
2370 2374 }
2371 2375
2372 2376 globalopts = [
2373 2377 ('R', 'repository', "", _("repository root directory")),
2374 2378 ('', 'cwd', '', _("change working directory")),
2375 2379 ('y', 'noninteractive', None, _("do not prompt, assume 'yes' for any required answers")),
2376 2380 ('q', 'quiet', None, _("suppress output")),
2377 2381 ('v', 'verbose', None, _("enable additional output")),
2378 2382 ('', 'debug', None, _("enable debugging output")),
2379 2383 ('', 'debugger', None, _("start debugger")),
2380 2384 ('', 'traceback', None, _("print traceback on exception")),
2381 2385 ('', 'time', None, _("time how long the command takes")),
2382 2386 ('', 'profile', None, _("print command execution profile")),
2383 2387 ('', 'version', None, _("output version information and exit")),
2384 2388 ('h', 'help', None, _("display help and exit")),
2385 2389 ]
2386 2390
2387 2391 norepo = ("clone init version help debugancestor debugconfig debugdata"
2388 2392 " debugindex debugindexdot paths")
2389 2393
2390 2394 def find(cmd):
2391 2395 """Return (aliases, command table entry) for command string."""
2392 2396 choice = None
2393 2397 for e in table.keys():
2394 2398 aliases = e.lstrip("^").split("|")
2395 2399 if cmd in aliases:
2396 2400 return aliases, table[e]
2397 2401 for a in aliases:
2398 2402 if a.startswith(cmd):
2399 2403 if choice:
2400 2404 raise AmbiguousCommand(cmd)
2401 2405 else:
2402 2406 choice = aliases, table[e]
2403 2407 break
2404 2408 if choice:
2405 2409 return choice
2406 2410
2407 2411 raise UnknownCommand(cmd)
2408 2412
2409 2413 class SignalInterrupt(Exception):
2410 2414 """Exception raised on SIGTERM and SIGHUP."""
2411 2415
2412 2416 def catchterm(*args):
2413 2417 raise SignalInterrupt
2414 2418
2415 2419 def run():
2416 2420 sys.exit(dispatch(sys.argv[1:]))
2417 2421
2418 2422 class ParseError(Exception):
2419 2423 """Exception raised on errors in parsing the command line."""
2420 2424
2421 2425 def parse(ui, args):
2422 2426 options = {}
2423 2427 cmdoptions = {}
2424 2428
2425 2429 try:
2426 2430 args = fancyopts.fancyopts(args, globalopts, options)
2427 2431 except fancyopts.getopt.GetoptError, inst:
2428 2432 raise ParseError(None, inst)
2429 2433
2430 2434 if args:
2431 2435 cmd, args = args[0], args[1:]
2432 2436 defaults = ui.config("defaults", cmd)
2433 2437 if defaults:
2434 2438 args = defaults.split() + args
2435 2439
2436 2440 aliases, i = find(cmd)
2437 2441 cmd = aliases[0]
2438 2442 c = list(i[1])
2439 2443 else:
2440 2444 cmd = None
2441 2445 c = []
2442 2446
2443 2447 # combine global options into local
2444 2448 for o in globalopts:
2445 2449 c.append((o[0], o[1], options[o[1]], o[3]))
2446 2450
2447 2451 try:
2448 2452 args = fancyopts.fancyopts(args, c, cmdoptions)
2449 2453 except fancyopts.getopt.GetoptError, inst:
2450 2454 raise ParseError(cmd, inst)
2451 2455
2452 2456 # separate global options back out
2453 2457 for o in globalopts:
2454 2458 n = o[1]
2455 2459 options[n] = cmdoptions[n]
2456 2460 del cmdoptions[n]
2457 2461
2458 2462 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
2459 2463
2460 2464 def dispatch(args):
2461 2465 signal.signal(signal.SIGTERM, catchterm)
2462 2466 try:
2463 2467 signal.signal(signal.SIGHUP, catchterm)
2464 2468 except AttributeError:
2465 2469 pass
2466 2470
2467 2471 try:
2468 2472 u = ui.ui()
2469 2473 except util.Abort, inst:
2470 2474 sys.stderr.write(_("abort: %s\n") % inst)
2471 2475 sys.exit(1)
2472 2476
2473 2477 external = []
2474 2478 for x in u.extensions():
2475 2479 def on_exception(exc, inst):
2476 2480 u.warn(_("*** failed to import extension %s\n") % x[1])
2477 2481 u.warn("%s\n" % inst)
2478 2482 if "--traceback" in sys.argv[1:]:
2479 2483 traceback.print_exc()
2480 2484 if x[1]:
2481 2485 try:
2482 2486 mod = imp.load_source(x[0], x[1])
2483 2487 except Exception, inst:
2484 2488 on_exception(Exception, inst)
2485 2489 continue
2486 2490 else:
2487 2491 def importh(name):
2488 2492 mod = __import__(name)
2489 2493 components = name.split('.')
2490 2494 for comp in components[1:]:
2491 2495 mod = getattr(mod, comp)
2492 2496 return mod
2493 2497 try:
2494 2498 mod = importh(x[0])
2495 2499 except Exception, inst:
2496 2500 on_exception(Exception, inst)
2497 2501 continue
2498 2502
2499 2503 external.append(mod)
2500 2504 for x in external:
2501 2505 cmdtable = getattr(x, 'cmdtable', {})
2502 2506 for t in cmdtable:
2503 2507 if t in table:
2504 2508 u.warn(_("module %s overrides %s\n") % (x.__name__, t))
2505 2509 table.update(cmdtable)
2506 2510
2507 2511 try:
2508 2512 cmd, func, args, options, cmdoptions = parse(u, args)
2509 2513 except ParseError, inst:
2510 2514 if inst.args[0]:
2511 2515 u.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
2512 2516 help_(u, inst.args[0])
2513 2517 else:
2514 2518 u.warn(_("hg: %s\n") % inst.args[1])
2515 2519 help_(u, 'shortlist')
2516 2520 sys.exit(-1)
2517 2521 except AmbiguousCommand, inst:
2518 2522 u.warn(_("hg: command '%s' is ambiguous.\n") % inst.args[0])
2519 2523 sys.exit(1)
2520 2524 except UnknownCommand, inst:
2521 2525 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
2522 2526 help_(u, 'shortlist')
2523 2527 sys.exit(1)
2524 2528
2525 2529 if options["time"]:
2526 2530 def get_times():
2527 2531 t = os.times()
2528 2532 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
2529 2533 t = (t[0], t[1], t[2], t[3], time.clock())
2530 2534 return t
2531 2535 s = get_times()
2532 2536 def print_time():
2533 2537 t = get_times()
2534 2538 u.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
2535 2539 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
2536 2540 atexit.register(print_time)
2537 2541
2538 2542 u.updateopts(options["verbose"], options["debug"], options["quiet"],
2539 2543 not options["noninteractive"])
2540 2544
2541 2545 # enter the debugger before command execution
2542 2546 if options['debugger']:
2543 2547 pdb.set_trace()
2544 2548
2545 2549 try:
2546 2550 try:
2547 2551 if options['help']:
2548 2552 help_(u, cmd, options['version'])
2549 2553 sys.exit(0)
2550 2554 elif options['version']:
2551 2555 show_version(u)
2552 2556 sys.exit(0)
2553 2557 elif not cmd:
2554 2558 help_(u, 'shortlist')
2555 2559 sys.exit(0)
2556 2560
2557 2561 if options['cwd']:
2558 2562 try:
2559 2563 os.chdir(options['cwd'])
2560 2564 except OSError, inst:
2561 2565 raise util.Abort('%s: %s' %
2562 2566 (options['cwd'], inst.strerror))
2563 2567
2564 2568 if cmd not in norepo.split():
2565 2569 path = options["repository"] or ""
2566 2570 repo = hg.repository(ui=u, path=path)
2567 2571 for x in external:
2568 2572 if hasattr(x, 'reposetup'): x.reposetup(u, repo)
2569 2573 d = lambda: func(u, repo, *args, **cmdoptions)
2570 2574 else:
2571 2575 d = lambda: func(u, *args, **cmdoptions)
2572 2576
2573 2577 if options['profile']:
2574 2578 import hotshot, hotshot.stats
2575 2579 prof = hotshot.Profile("hg.prof")
2576 2580 r = prof.runcall(d)
2577 2581 prof.close()
2578 2582 stats = hotshot.stats.load("hg.prof")
2579 2583 stats.strip_dirs()
2580 2584 stats.sort_stats('time', 'calls')
2581 2585 stats.print_stats(40)
2582 2586 return r
2583 2587 else:
2584 2588 return d()
2585 2589 except:
2586 2590 # enter the debugger when we hit an exception
2587 2591 if options['debugger']:
2588 2592 pdb.post_mortem(sys.exc_info()[2])
2589 2593 if options['traceback']:
2590 2594 traceback.print_exc()
2591 2595 raise
2592 2596 except hg.RepoError, inst:
2593 2597 u.warn(_("abort: "), inst, "!\n")
2594 2598 except revlog.RevlogError, inst:
2595 2599 u.warn(_("abort: "), inst, "!\n")
2596 2600 except SignalInterrupt:
2597 2601 u.warn(_("killed!\n"))
2598 2602 except KeyboardInterrupt:
2599 2603 try:
2600 2604 u.warn(_("interrupted!\n"))
2601 2605 except IOError, inst:
2602 2606 if inst.errno == errno.EPIPE:
2603 2607 if u.debugflag:
2604 2608 u.warn(_("\nbroken pipe\n"))
2605 2609 else:
2606 2610 raise
2607 2611 except IOError, inst:
2608 2612 if hasattr(inst, "code"):
2609 2613 u.warn(_("abort: %s\n") % inst)
2610 2614 elif hasattr(inst, "reason"):
2611 2615 u.warn(_("abort: error: %s\n") % inst.reason[1])
2612 2616 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
2613 2617 if u.debugflag:
2614 2618 u.warn(_("broken pipe\n"))
2615 2619 elif getattr(inst, "strerror", None):
2616 2620 if getattr(inst, "filename", None):
2617 2621 u.warn(_("abort: %s - %s\n") % (inst.strerror, inst.filename))
2618 2622 else:
2619 2623 u.warn(_("abort: %s\n") % inst.strerror)
2620 2624 else:
2621 2625 raise
2622 2626 except OSError, inst:
2623 2627 if hasattr(inst, "filename"):
2624 2628 u.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
2625 2629 else:
2626 2630 u.warn(_("abort: %s\n") % inst.strerror)
2627 2631 except util.Abort, inst:
2628 2632 u.warn(_('abort: '), inst.args[0] % inst.args[1:], '\n')
2629 2633 sys.exit(1)
2630 2634 except TypeError, inst:
2631 2635 # was this an argument error?
2632 2636 tb = traceback.extract_tb(sys.exc_info()[2])
2633 2637 if len(tb) > 2: # no
2634 2638 raise
2635 2639 u.debug(inst, "\n")
2636 2640 u.warn(_("%s: invalid arguments\n") % cmd)
2637 2641 help_(u, cmd)
2638 2642 except AmbiguousCommand, inst:
2639 2643 u.warn(_("hg: command '%s' is ambiguous.\n") % inst.args[0])
2640 2644 help_(u, 'shortlist')
2641 2645 except UnknownCommand, inst:
2642 2646 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
2643 2647 help_(u, 'shortlist')
2644 2648 except SystemExit:
2645 2649 # don't catch this in the catch-all below
2646 2650 raise
2647 2651 except:
2648 2652 u.warn(_("** unknown exception encountered, details follow\n"))
2649 2653 u.warn(_("** report bug details to mercurial@selenic.com\n"))
2650 2654 raise
2651 2655
2652 2656 sys.exit(-1)
@@ -1,1776 +1,1780 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository:
16 16 def __init__(self, ui, path=None, create=0):
17 17 if not path:
18 18 p = os.getcwd()
19 19 while not os.path.isdir(os.path.join(p, ".hg")):
20 20 oldp = p
21 21 p = os.path.dirname(p)
22 22 if p == oldp: raise repo.RepoError(_("no repo found"))
23 23 path = p
24 24 self.path = os.path.join(path, ".hg")
25 25
26 26 if not create and not os.path.isdir(self.path):
27 27 raise repo.RepoError(_("repository %s not found") % self.path)
28 28
29 29 self.root = os.path.abspath(path)
30 30 self.ui = ui
31 31 self.opener = util.opener(self.path)
32 32 self.wopener = util.opener(self.root)
33 33 self.manifest = manifest.manifest(self.opener)
34 34 self.changelog = changelog.changelog(self.opener)
35 35 self.tagscache = None
36 36 self.nodetagscache = None
37 37 self.encodepats = None
38 38 self.decodepats = None
39 39
40 40 if create:
41 41 os.mkdir(self.path)
42 42 os.mkdir(self.join("data"))
43 43
44 44 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 45 try:
46 46 self.ui.readconfig(self.join("hgrc"))
47 47 except IOError: pass
48 48
49 49 def hook(self, name, **args):
50 50 def runhook(name, cmd):
51 51 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
52 52 old = {}
53 53 for k, v in args.items():
54 54 k = k.upper()
55 55 old[k] = os.environ.get(k, None)
56 56 os.environ[k] = v
57 57
58 58 # Hooks run in the repository root
59 59 olddir = os.getcwd()
60 60 os.chdir(self.root)
61 61 r = os.system(cmd)
62 62 os.chdir(olddir)
63 63
64 64 for k, v in old.items():
65 65 if v != None:
66 66 os.environ[k] = v
67 67 else:
68 68 del os.environ[k]
69 69
70 70 if r:
71 71 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
72 72 (name, r))
73 73 return False
74 74 return True
75 75
76 76 r = True
77 77 for hname, cmd in self.ui.configitems("hooks"):
78 78 s = hname.split(".")
79 79 if s[0] == name and cmd:
80 80 r = runhook(hname, cmd) and r
81 81 return r
82 82
83 83 def tags(self):
84 84 '''return a mapping of tag to node'''
85 85 if not self.tagscache:
86 86 self.tagscache = {}
87 87 def addtag(self, k, n):
88 88 try:
89 89 bin_n = bin(n)
90 90 except TypeError:
91 91 bin_n = ''
92 92 self.tagscache[k.strip()] = bin_n
93 93
94 94 try:
95 95 # read each head of the tags file, ending with the tip
96 96 # and add each tag found to the map, with "newer" ones
97 97 # taking precedence
98 98 fl = self.file(".hgtags")
99 99 h = fl.heads()
100 100 h.reverse()
101 101 for r in h:
102 102 for l in fl.read(r).splitlines():
103 103 if l:
104 104 n, k = l.split(" ", 1)
105 105 addtag(self, k, n)
106 106 except KeyError:
107 107 pass
108 108
109 109 try:
110 110 f = self.opener("localtags")
111 111 for l in f:
112 112 n, k = l.split(" ", 1)
113 113 addtag(self, k, n)
114 114 except IOError:
115 115 pass
116 116
117 117 self.tagscache['tip'] = self.changelog.tip()
118 118
119 119 return self.tagscache
120 120
121 121 def tagslist(self):
122 122 '''return a list of tags ordered by revision'''
123 123 l = []
124 124 for t, n in self.tags().items():
125 125 try:
126 126 r = self.changelog.rev(n)
127 127 except:
128 128 r = -2 # sort to the beginning of the list if unknown
129 129 l.append((r,t,n))
130 130 l.sort()
131 131 return [(t,n) for r,t,n in l]
132 132
133 133 def nodetags(self, node):
134 134 '''return the tags associated with a node'''
135 135 if not self.nodetagscache:
136 136 self.nodetagscache = {}
137 137 for t,n in self.tags().items():
138 138 self.nodetagscache.setdefault(n,[]).append(t)
139 139 return self.nodetagscache.get(node, [])
140 140
141 141 def lookup(self, key):
142 142 try:
143 143 return self.tags()[key]
144 144 except KeyError:
145 145 try:
146 146 return self.changelog.lookup(key)
147 147 except:
148 148 raise repo.RepoError(_("unknown revision '%s'") % key)
149 149
150 150 def dev(self):
151 151 return os.stat(self.path).st_dev
152 152
153 153 def local(self):
154 154 return True
155 155
156 156 def join(self, f):
157 157 return os.path.join(self.path, f)
158 158
159 159 def wjoin(self, f):
160 160 return os.path.join(self.root, f)
161 161
162 162 def file(self, f):
163 163 if f[0] == '/': f = f[1:]
164 164 return filelog.filelog(self.opener, f)
165 165
166 166 def getcwd(self):
167 167 return self.dirstate.getcwd()
168 168
169 169 def wfile(self, f, mode='r'):
170 170 return self.wopener(f, mode)
171 171
172 172 def wread(self, filename):
173 173 if self.encodepats == None:
174 174 l = []
175 175 for pat, cmd in self.ui.configitems("encode"):
176 176 mf = util.matcher("", "/", [pat], [], [])[1]
177 177 l.append((mf, cmd))
178 178 self.encodepats = l
179 179
180 180 data = self.wopener(filename, 'r').read()
181 181
182 182 for mf, cmd in self.encodepats:
183 183 if mf(filename):
184 184 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
185 185 data = util.filter(data, cmd)
186 186 break
187 187
188 188 return data
189 189
190 190 def wwrite(self, filename, data, fd=None):
191 191 if self.decodepats == None:
192 192 l = []
193 193 for pat, cmd in self.ui.configitems("decode"):
194 194 mf = util.matcher("", "/", [pat], [], [])[1]
195 195 l.append((mf, cmd))
196 196 self.decodepats = l
197 197
198 198 for mf, cmd in self.decodepats:
199 199 if mf(filename):
200 200 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
201 201 data = util.filter(data, cmd)
202 202 break
203 203
204 204 if fd:
205 205 return fd.write(data)
206 206 return self.wopener(filename, 'w').write(data)
207 207
208 208 def transaction(self):
209 209 # save dirstate for undo
210 210 try:
211 211 ds = self.opener("dirstate").read()
212 212 except IOError:
213 213 ds = ""
214 214 self.opener("journal.dirstate", "w").write(ds)
215 215
216 216 def after():
217 217 util.rename(self.join("journal"), self.join("undo"))
218 218 util.rename(self.join("journal.dirstate"),
219 219 self.join("undo.dirstate"))
220 220
221 221 return transaction.transaction(self.ui.warn, self.opener,
222 222 self.join("journal"), after)
223 223
224 224 def recover(self):
225 225 lock = self.lock()
226 226 if os.path.exists(self.join("journal")):
227 227 self.ui.status(_("rolling back interrupted transaction\n"))
228 228 transaction.rollback(self.opener, self.join("journal"))
229 229 return True
230 230 else:
231 231 self.ui.warn(_("no interrupted transaction available\n"))
232 232 return False
233 233
234 234 def undo(self):
235 235 wlock = self.wlock()
236 236 lock = self.lock()
237 237 if os.path.exists(self.join("undo")):
238 238 self.ui.status(_("rolling back last transaction\n"))
239 239 transaction.rollback(self.opener, self.join("undo"))
240 240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
241 241 self.dirstate.read()
242 242 else:
243 243 self.ui.warn(_("no undo information available\n"))
244 244
245 245 def lock(self, wait=1):
246 246 try:
247 247 return lock.lock(self.join("lock"), 0)
248 248 except lock.LockHeld, inst:
249 249 if wait:
250 250 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
251 251 return lock.lock(self.join("lock"), wait)
252 252 raise inst
253 253
254 254 def wlock(self, wait=1):
255 255 try:
256 256 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
257 257 except lock.LockHeld, inst:
258 258 if not wait:
259 259 raise inst
260 260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
261 261 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
262 262 self.dirstate.read()
263 263 return wlock
264 264
265 265 def rawcommit(self, files, text, user, date, p1=None, p2=None):
266 266 orig_parent = self.dirstate.parents()[0] or nullid
267 267 p1 = p1 or self.dirstate.parents()[0] or nullid
268 268 p2 = p2 or self.dirstate.parents()[1] or nullid
269 269 c1 = self.changelog.read(p1)
270 270 c2 = self.changelog.read(p2)
271 271 m1 = self.manifest.read(c1[0])
272 272 mf1 = self.manifest.readflags(c1[0])
273 273 m2 = self.manifest.read(c2[0])
274 274 changed = []
275 275
276 276 if orig_parent == p1:
277 277 update_dirstate = 1
278 278 else:
279 279 update_dirstate = 0
280 280
281 281 wlock = self.wlock()
282 282 lock = self.lock()
283 283 tr = self.transaction()
284 284 mm = m1.copy()
285 285 mfm = mf1.copy()
286 286 linkrev = self.changelog.count()
287 287 for f in files:
288 288 try:
289 289 t = self.wread(f)
290 290 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
291 291 r = self.file(f)
292 292 mfm[f] = tm
293 293
294 294 fp1 = m1.get(f, nullid)
295 295 fp2 = m2.get(f, nullid)
296 296
297 297 # is the same revision on two branches of a merge?
298 298 if fp2 == fp1:
299 299 fp2 = nullid
300 300
301 301 if fp2 != nullid:
302 302 # is one parent an ancestor of the other?
303 303 fpa = r.ancestor(fp1, fp2)
304 304 if fpa == fp1:
305 305 fp1, fp2 = fp2, nullid
306 306 elif fpa == fp2:
307 307 fp2 = nullid
308 308
309 309 # is the file unmodified from the parent?
310 310 if t == r.read(fp1):
311 311 # record the proper existing parent in manifest
312 312 # no need to add a revision
313 313 mm[f] = fp1
314 314 continue
315 315
316 316 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
317 317 changed.append(f)
318 318 if update_dirstate:
319 319 self.dirstate.update([f], "n")
320 320 except IOError:
321 321 try:
322 322 del mm[f]
323 323 del mfm[f]
324 324 if update_dirstate:
325 325 self.dirstate.forget([f])
326 326 except:
327 327 # deleted from p2?
328 328 pass
329 329
330 330 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
331 331 user = user or self.ui.username()
332 332 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
333 333 tr.close()
334 334 if update_dirstate:
335 335 self.dirstate.setparents(n, nullid)
336 336
337 337 def commit(self, files = None, text = "", user = None, date = None,
338 338 match = util.always, force=False):
339 339 commit = []
340 340 remove = []
341 341 changed = []
342 342
343 343 if files:
344 344 for f in files:
345 345 s = self.dirstate.state(f)
346 346 if s in 'nmai':
347 347 commit.append(f)
348 348 elif s == 'r':
349 349 remove.append(f)
350 350 else:
351 351 self.ui.warn(_("%s not tracked!\n") % f)
352 352 else:
353 353 (c, a, d, u) = self.changes(match=match)
354 354 commit = c + a
355 355 remove = d
356 356
357 357 p1, p2 = self.dirstate.parents()
358 358 c1 = self.changelog.read(p1)
359 359 c2 = self.changelog.read(p2)
360 360 m1 = self.manifest.read(c1[0])
361 361 mf1 = self.manifest.readflags(c1[0])
362 362 m2 = self.manifest.read(c2[0])
363 363
364 364 if not commit and not remove and not force and p2 == nullid:
365 365 self.ui.status(_("nothing changed\n"))
366 366 return None
367 367
368 368 if not self.hook("precommit"):
369 369 return None
370 370
371 371 wlock = self.wlock()
372 372 lock = self.lock()
373 373 tr = self.transaction()
374 374
375 375 # check in files
376 376 new = {}
377 377 linkrev = self.changelog.count()
378 378 commit.sort()
379 379 for f in commit:
380 380 self.ui.note(f + "\n")
381 381 try:
382 382 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
383 383 t = self.wread(f)
384 384 except IOError:
385 385 self.ui.warn(_("trouble committing %s!\n") % f)
386 386 raise
387 387
388 388 r = self.file(f)
389 389
390 390 meta = {}
391 391 cp = self.dirstate.copied(f)
392 392 if cp:
393 393 meta["copy"] = cp
394 394 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
395 395 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
396 396 fp1, fp2 = nullid, nullid
397 397 else:
398 398 fp1 = m1.get(f, nullid)
399 399 fp2 = m2.get(f, nullid)
400 400
401 401 # is the same revision on two branches of a merge?
402 402 if fp2 == fp1:
403 403 fp2 = nullid
404 404
405 405 if fp2 != nullid:
406 406 # is one parent an ancestor of the other?
407 407 fpa = r.ancestor(fp1, fp2)
408 408 if fpa == fp1:
409 409 fp1, fp2 = fp2, nullid
410 410 elif fpa == fp2:
411 411 fp2 = nullid
412 412
413 413 # is the file unmodified from the parent?
414 414 if not meta and t == r.read(fp1):
415 415 # record the proper existing parent in manifest
416 416 # no need to add a revision
417 417 new[f] = fp1
418 418 continue
419 419
420 420 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
421 421 # remember what we've added so that we can later calculate
422 422 # the files to pull from a set of changesets
423 423 changed.append(f)
424 424
425 425 # update manifest
426 426 m1.update(new)
427 427 for f in remove:
428 428 if f in m1:
429 429 del m1[f]
430 430 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
431 431 (new, remove))
432 432
433 433 # add changeset
434 434 new = new.keys()
435 435 new.sort()
436 436
437 437 if not text:
438 438 edittext = ""
439 439 if p2 != nullid:
440 440 edittext += "HG: branch merge\n"
441 441 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
442 442 edittext += "".join(["HG: changed %s\n" % f for f in changed])
443 443 edittext += "".join(["HG: removed %s\n" % f for f in remove])
444 444 if not changed and not remove:
445 445 edittext += "HG: no files changed\n"
446 446 edittext = self.ui.edit(edittext)
447 447 if not edittext.rstrip():
448 448 return None
449 449 text = edittext
450 450
451 451 user = user or self.ui.username()
452 452 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
453 453 tr.close()
454 454
455 455 self.dirstate.setparents(n)
456 456 self.dirstate.update(new, "n")
457 457 self.dirstate.forget(remove)
458 458
459 459 if not self.hook("commit", node=hex(n)):
460 460 return None
461 461 return n
462 462
463 463 def walk(self, node=None, files=[], match=util.always):
464 464 if node:
465 465 for fn in self.manifest.read(self.changelog.read(node)[0]):
466 466 if match(fn): yield 'm', fn
467 467 else:
468 468 for src, fn in self.dirstate.walk(files, match):
469 469 yield src, fn
470 470
471 471 def changes(self, node1 = None, node2 = None, files = [],
472 472 match = util.always):
473 473 mf2, u = None, []
474 474
475 475 def fcmp(fn, mf):
476 476 t1 = self.wread(fn)
477 477 t2 = self.file(fn).read(mf.get(fn, nullid))
478 478 return cmp(t1, t2)
479 479
480 480 def mfmatches(node):
481 481 mf = dict(self.manifest.read(node))
482 482 for fn in mf.keys():
483 483 if not match(fn):
484 484 del mf[fn]
485 485 return mf
486 486
487 487 # are we comparing the working directory?
488 488 if not node2:
489 489 try:
490 490 wlock = self.wlock(wait=0)
491 491 except lock.LockHeld:
492 492 wlock = None
493 493 l, c, a, d, u = self.dirstate.changes(files, match)
494 494
495 495 # are we comparing working dir against its parent?
496 496 if not node1:
497 497 if l:
498 498 # do a full compare of any files that might have changed
499 499 change = self.changelog.read(self.dirstate.parents()[0])
500 500 mf2 = mfmatches(change[0])
501 501 for f in l:
502 502 if fcmp(f, mf2):
503 503 c.append(f)
504 504 elif wlock is not None:
505 505 self.dirstate.update([f], "n")
506 506
507 507 for l in c, a, d, u:
508 508 l.sort()
509 509
510 510 return (c, a, d, u)
511 511
512 512 # are we comparing working dir against non-tip?
513 513 # generate a pseudo-manifest for the working dir
514 514 if not node2:
515 515 if not mf2:
516 516 change = self.changelog.read(self.dirstate.parents()[0])
517 517 mf2 = mfmatches(change[0])
518 518 for f in a + c + l:
519 519 mf2[f] = ""
520 520 for f in d:
521 521 if f in mf2: del mf2[f]
522 522 else:
523 523 change = self.changelog.read(node2)
524 524 mf2 = mfmatches(change[0])
525 525
526 526 # flush lists from dirstate before comparing manifests
527 527 c, a = [], []
528 528
529 529 change = self.changelog.read(node1)
530 530 mf1 = mfmatches(change[0])
531 531
532 532 for fn in mf2:
533 533 if mf1.has_key(fn):
534 534 if mf1[fn] != mf2[fn]:
535 535 if mf2[fn] != "" or fcmp(fn, mf1):
536 536 c.append(fn)
537 537 del mf1[fn]
538 538 else:
539 539 a.append(fn)
540 540
541 541 d = mf1.keys()
542 542
543 543 for l in c, a, d, u:
544 544 l.sort()
545 545
546 546 return (c, a, d, u)
547 547
548 548 def add(self, list):
549 549 wlock = self.wlock()
550 550 for f in list:
551 551 p = self.wjoin(f)
552 552 if not os.path.exists(p):
553 553 self.ui.warn(_("%s does not exist!\n") % f)
554 554 elif not os.path.isfile(p):
555 555 self.ui.warn(_("%s not added: only files supported currently\n") % f)
556 556 elif self.dirstate.state(f) in 'an':
557 557 self.ui.warn(_("%s already tracked!\n") % f)
558 558 else:
559 559 self.dirstate.update([f], "a")
560 560
561 561 def forget(self, list):
562 562 wlock = self.wlock()
563 563 for f in list:
564 564 if self.dirstate.state(f) not in 'ai':
565 565 self.ui.warn(_("%s not added!\n") % f)
566 566 else:
567 567 self.dirstate.forget([f])
568 568
569 569 def remove(self, list, unlink=False):
570 570 if unlink:
571 571 for f in list:
572 572 try:
573 573 util.unlink(self.wjoin(f))
574 574 except OSError, inst:
575 575 if inst.errno != errno.ENOENT: raise
576 576 wlock = self.wlock()
577 577 for f in list:
578 578 p = self.wjoin(f)
579 579 if os.path.exists(p):
580 580 self.ui.warn(_("%s still exists!\n") % f)
581 581 elif self.dirstate.state(f) == 'a':
582 582 self.ui.warn(_("%s never committed!\n") % f)
583 583 self.dirstate.forget([f])
584 584 elif f not in self.dirstate:
585 585 self.ui.warn(_("%s not tracked!\n") % f)
586 586 else:
587 587 self.dirstate.update([f], "r")
588 588
589 589 def undelete(self, list):
590 590 p = self.dirstate.parents()[0]
591 591 mn = self.changelog.read(p)[0]
592 592 mf = self.manifest.readflags(mn)
593 593 m = self.manifest.read(mn)
594 594 wlock = self.wlock()
595 595 for f in list:
596 596 if self.dirstate.state(f) not in "r":
597 597 self.ui.warn("%s not removed!\n" % f)
598 598 else:
599 599 t = self.file(f).read(m[f])
600 600 self.wwrite(f, t)
601 601 util.set_exec(self.wjoin(f), mf[f])
602 602 self.dirstate.update([f], "n")
603 603
604 604 def copy(self, source, dest):
605 605 p = self.wjoin(dest)
606 606 if not os.path.exists(p):
607 607 self.ui.warn(_("%s does not exist!\n") % dest)
608 608 elif not os.path.isfile(p):
609 609 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
610 610 else:
611 611 wlock = self.wlock()
612 612 if self.dirstate.state(dest) == '?':
613 613 self.dirstate.update([dest], "a")
614 614 self.dirstate.copy(source, dest)
615 615
616 def heads(self):
617 return self.changelog.heads()
616 def heads(self, start=nullid):
617 heads = self.changelog.heads(start)
618 # sort the output in rev descending order
619 heads = [(-self.changelog.rev(h), h) for h in heads]
620 heads.sort()
621 return [n for (r, n) in heads]
618 622
619 623 # branchlookup returns a dict giving a list of branches for
620 624 # each head. A branch is defined as the tag of a node or
621 625 # the branch of the node's parents. If a node has multiple
622 626 # branch tags, tags are eliminated if they are visible from other
623 627 # branch tags.
624 628 #
625 629 # So, for this graph: a->b->c->d->e
626 630 # \ /
627 631 # aa -----/
628 632 # a has tag 2.6.12
629 633 # d has tag 2.6.13
630 634 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
631 635 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
632 636 # from the list.
633 637 #
634 638 # It is possible that more than one head will have the same branch tag.
635 639 # callers need to check the result for multiple heads under the same
636 640 # branch tag if that is a problem for them (ie checkout of a specific
637 641 # branch).
638 642 #
639 643 # passing in a specific branch will limit the depth of the search
640 644 # through the parents. It won't limit the branches returned in the
641 645 # result though.
642 646 def branchlookup(self, heads=None, branch=None):
643 647 if not heads:
644 648 heads = self.heads()
645 649 headt = [ h for h in heads ]
646 650 chlog = self.changelog
647 651 branches = {}
648 652 merges = []
649 653 seenmerge = {}
650 654
651 655 # traverse the tree once for each head, recording in the branches
652 656 # dict which tags are visible from this head. The branches
653 657 # dict also records which tags are visible from each tag
654 658 # while we traverse.
655 659 while headt or merges:
656 660 if merges:
657 661 n, found = merges.pop()
658 662 visit = [n]
659 663 else:
660 664 h = headt.pop()
661 665 visit = [h]
662 666 found = [h]
663 667 seen = {}
664 668 while visit:
665 669 n = visit.pop()
666 670 if n in seen:
667 671 continue
668 672 pp = chlog.parents(n)
669 673 tags = self.nodetags(n)
670 674 if tags:
671 675 for x in tags:
672 676 if x == 'tip':
673 677 continue
674 678 for f in found:
675 679 branches.setdefault(f, {})[n] = 1
676 680 branches.setdefault(n, {})[n] = 1
677 681 break
678 682 if n not in found:
679 683 found.append(n)
680 684 if branch in tags:
681 685 continue
682 686 seen[n] = 1
683 687 if pp[1] != nullid and n not in seenmerge:
684 688 merges.append((pp[1], [x for x in found]))
685 689 seenmerge[n] = 1
686 690 if pp[0] != nullid:
687 691 visit.append(pp[0])
688 692 # traverse the branches dict, eliminating branch tags from each
689 693 # head that are visible from another branch tag for that head.
690 694 out = {}
691 695 viscache = {}
692 696 for h in heads:
693 697 def visible(node):
694 698 if node in viscache:
695 699 return viscache[node]
696 700 ret = {}
697 701 visit = [node]
698 702 while visit:
699 703 x = visit.pop()
700 704 if x in viscache:
701 705 ret.update(viscache[x])
702 706 elif x not in ret:
703 707 ret[x] = 1
704 708 if x in branches:
705 709 visit[len(visit):] = branches[x].keys()
706 710 viscache[node] = ret
707 711 return ret
708 712 if h not in branches:
709 713 continue
710 714 # O(n^2), but somewhat limited. This only searches the
711 715 # tags visible from a specific head, not all the tags in the
712 716 # whole repo.
713 717 for b in branches[h]:
714 718 vis = False
715 719 for bb in branches[h].keys():
716 720 if b != bb:
717 721 if b in visible(bb):
718 722 vis = True
719 723 break
720 724 if not vis:
721 725 l = out.setdefault(h, [])
722 726 l[len(l):] = self.nodetags(b)
723 727 return out
724 728
725 729 def branches(self, nodes):
726 730 if not nodes: nodes = [self.changelog.tip()]
727 731 b = []
728 732 for n in nodes:
729 733 t = n
730 734 while n:
731 735 p = self.changelog.parents(n)
732 736 if p[1] != nullid or p[0] == nullid:
733 737 b.append((t, n, p[0], p[1]))
734 738 break
735 739 n = p[0]
736 740 return b
737 741
738 742 def between(self, pairs):
739 743 r = []
740 744
741 745 for top, bottom in pairs:
742 746 n, l, i = top, [], 0
743 747 f = 1
744 748
745 749 while n != bottom:
746 750 p = self.changelog.parents(n)[0]
747 751 if i == f:
748 752 l.append(n)
749 753 f = f * 2
750 754 n = p
751 755 i += 1
752 756
753 757 r.append(l)
754 758
755 759 return r
756 760
757 761 def findincoming(self, remote, base=None, heads=None):
758 762 m = self.changelog.nodemap
759 763 search = []
760 764 fetch = {}
761 765 seen = {}
762 766 seenbranch = {}
763 767 if base == None:
764 768 base = {}
765 769
766 770 # assume we're closer to the tip than the root
767 771 # and start by examining the heads
768 772 self.ui.status(_("searching for changes\n"))
769 773
770 774 if not heads:
771 775 heads = remote.heads()
772 776
773 777 unknown = []
774 778 for h in heads:
775 779 if h not in m:
776 780 unknown.append(h)
777 781 else:
778 782 base[h] = 1
779 783
780 784 if not unknown:
781 785 return None
782 786
783 787 rep = {}
784 788 reqcnt = 0
785 789
786 790 # search through remote branches
787 791 # a 'branch' here is a linear segment of history, with four parts:
788 792 # head, root, first parent, second parent
789 793 # (a branch always has two parents (or none) by definition)
790 794 unknown = remote.branches(unknown)
791 795 while unknown:
792 796 r = []
793 797 while unknown:
794 798 n = unknown.pop(0)
795 799 if n[0] in seen:
796 800 continue
797 801
798 802 self.ui.debug(_("examining %s:%s\n") % (short(n[0]), short(n[1])))
799 803 if n[0] == nullid:
800 804 break
801 805 if n in seenbranch:
802 806 self.ui.debug(_("branch already found\n"))
803 807 continue
804 808 if n[1] and n[1] in m: # do we know the base?
805 809 self.ui.debug(_("found incomplete branch %s:%s\n")
806 810 % (short(n[0]), short(n[1])))
807 811 search.append(n) # schedule branch range for scanning
808 812 seenbranch[n] = 1
809 813 else:
810 814 if n[1] not in seen and n[1] not in fetch:
811 815 if n[2] in m and n[3] in m:
812 816 self.ui.debug(_("found new changeset %s\n") %
813 817 short(n[1]))
814 818 fetch[n[1]] = 1 # earliest unknown
815 819 base[n[2]] = 1 # latest known
816 820 continue
817 821
818 822 for a in n[2:4]:
819 823 if a not in rep:
820 824 r.append(a)
821 825 rep[a] = 1
822 826
823 827 seen[n[0]] = 1
824 828
825 829 if r:
826 830 reqcnt += 1
827 831 self.ui.debug(_("request %d: %s\n") %
828 832 (reqcnt, " ".join(map(short, r))))
829 833 for p in range(0, len(r), 10):
830 834 for b in remote.branches(r[p:p+10]):
831 835 self.ui.debug(_("received %s:%s\n") %
832 836 (short(b[0]), short(b[1])))
833 837 if b[0] in m:
834 838 self.ui.debug(_("found base node %s\n") % short(b[0]))
835 839 base[b[0]] = 1
836 840 elif b[0] not in seen:
837 841 unknown.append(b)
838 842
839 843 # do binary search on the branches we found
840 844 while search:
841 845 n = search.pop(0)
842 846 reqcnt += 1
843 847 l = remote.between([(n[0], n[1])])[0]
844 848 l.append(n[1])
845 849 p = n[0]
846 850 f = 1
847 851 for i in l:
848 852 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
849 853 if i in m:
850 854 if f <= 2:
851 855 self.ui.debug(_("found new branch changeset %s\n") %
852 856 short(p))
853 857 fetch[p] = 1
854 858 base[i] = 1
855 859 else:
856 860 self.ui.debug(_("narrowed branch search to %s:%s\n")
857 861 % (short(p), short(i)))
858 862 search.append((p, i))
859 863 break
860 864 p, f = i, f * 2
861 865
862 866 # sanity check our fetch list
863 867 for f in fetch.keys():
864 868 if f in m:
865 869 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
866 870
867 871 if base.keys() == [nullid]:
868 872 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
869 873
870 874 self.ui.note(_("found new changesets starting at ") +
871 875 " ".join([short(f) for f in fetch]) + "\n")
872 876
873 877 self.ui.debug(_("%d total queries\n") % reqcnt)
874 878
875 879 return fetch.keys()
876 880
877 881 def findoutgoing(self, remote, base=None, heads=None):
878 882 if base == None:
879 883 base = {}
880 884 self.findincoming(remote, base, heads)
881 885
882 886 self.ui.debug(_("common changesets up to ")
883 887 + " ".join(map(short, base.keys())) + "\n")
884 888
885 889 remain = dict.fromkeys(self.changelog.nodemap)
886 890
887 891 # prune everything remote has from the tree
888 892 del remain[nullid]
889 893 remove = base.keys()
890 894 while remove:
891 895 n = remove.pop(0)
892 896 if n in remain:
893 897 del remain[n]
894 898 for p in self.changelog.parents(n):
895 899 remove.append(p)
896 900
897 901 # find every node whose parents have been pruned
898 902 subset = []
899 903 for n in remain:
900 904 p1, p2 = self.changelog.parents(n)
901 905 if p1 not in remain and p2 not in remain:
902 906 subset.append(n)
903 907
904 908 # this is the set of all roots we have to push
905 909 return subset
906 910
907 911 def pull(self, remote, heads = None):
908 912 lock = self.lock()
909 913
910 914 # if we have an empty repo, fetch everything
911 915 if self.changelog.tip() == nullid:
912 916 self.ui.status(_("requesting all changes\n"))
913 917 fetch = [nullid]
914 918 else:
915 919 fetch = self.findincoming(remote)
916 920
917 921 if not fetch:
918 922 self.ui.status(_("no changes found\n"))
919 923 return 1
920 924
921 925 if heads is None:
922 926 cg = remote.changegroup(fetch)
923 927 else:
924 928 cg = remote.changegroupsubset(fetch, heads)
925 929 return self.addchangegroup(cg)
926 930
927 931 def push(self, remote, force=False):
928 932 lock = remote.lock()
929 933
930 934 base = {}
931 935 heads = remote.heads()
932 936 inc = self.findincoming(remote, base, heads)
933 937 if not force and inc:
934 938 self.ui.warn(_("abort: unsynced remote changes!\n"))
935 939 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
936 940 return 1
937 941
938 942 update = self.findoutgoing(remote, base)
939 943 if not update:
940 944 self.ui.status(_("no changes found\n"))
941 945 return 1
942 946 elif not force:
943 947 if len(heads) < len(self.changelog.heads()):
944 948 self.ui.warn(_("abort: push creates new remote branches!\n"))
945 949 self.ui.status(_("(did you forget to merge?"
946 950 " use push -f to force)\n"))
947 951 return 1
948 952
949 953 cg = self.changegroup(update)
950 954 return remote.addchangegroup(cg)
951 955
952 956 def changegroupsubset(self, bases, heads):
953 957 """This function generates a changegroup consisting of all the nodes
954 958 that are descendents of any of the bases, and ancestors of any of
955 959 the heads.
956 960
957 961 It is fairly complex as determining which filenodes and which
958 962 manifest nodes need to be included for the changeset to be complete
959 963 is non-trivial.
960 964
961 965 Another wrinkle is doing the reverse, figuring out which changeset in
962 966 the changegroup a particular filenode or manifestnode belongs to."""
963 967
964 968 # Set up some initial variables
965 969 # Make it easy to refer to self.changelog
966 970 cl = self.changelog
967 971 # msng is short for missing - compute the list of changesets in this
968 972 # changegroup.
969 973 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
970 974 # Some bases may turn out to be superfluous, and some heads may be
971 975 # too. nodesbetween will return the minimal set of bases and heads
972 976 # necessary to re-create the changegroup.
973 977
974 978 # Known heads are the list of heads that it is assumed the recipient
975 979 # of this changegroup will know about.
976 980 knownheads = {}
977 981 # We assume that all parents of bases are known heads.
978 982 for n in bases:
979 983 for p in cl.parents(n):
980 984 if p != nullid:
981 985 knownheads[p] = 1
982 986 knownheads = knownheads.keys()
983 987 if knownheads:
984 988 # Now that we know what heads are known, we can compute which
985 989 # changesets are known. The recipient must know about all
986 990 # changesets required to reach the known heads from the null
987 991 # changeset.
988 992 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
989 993 junk = None
990 994 # Transform the list into an ersatz set.
991 995 has_cl_set = dict.fromkeys(has_cl_set)
992 996 else:
993 997 # If there were no known heads, the recipient cannot be assumed to
994 998 # know about any changesets.
995 999 has_cl_set = {}
996 1000
997 1001 # Make it easy to refer to self.manifest
998 1002 mnfst = self.manifest
999 1003 # We don't know which manifests are missing yet
1000 1004 msng_mnfst_set = {}
1001 1005 # Nor do we know which filenodes are missing.
1002 1006 msng_filenode_set = {}
1003 1007
1004 1008 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1005 1009 junk = None
1006 1010
1007 1011 # A changeset always belongs to itself, so the changenode lookup
1008 1012 # function for a changenode is identity.
1009 1013 def identity(x):
1010 1014 return x
1011 1015
1012 1016 # A function generating function. Sets up an environment for the
1013 1017 # inner function.
1014 1018 def cmp_by_rev_func(revlog):
1015 1019 # Compare two nodes by their revision number in the environment's
1016 1020 # revision history. Since the revision number both represents the
1017 1021 # most efficient order to read the nodes in, and represents a
1018 1022 # topological sorting of the nodes, this function is often useful.
1019 1023 def cmp_by_rev(a, b):
1020 1024 return cmp(revlog.rev(a), revlog.rev(b))
1021 1025 return cmp_by_rev
1022 1026
1023 1027 # If we determine that a particular file or manifest node must be a
1024 1028 # node that the recipient of the changegroup will already have, we can
1025 1029 # also assume the recipient will have all the parents. This function
1026 1030 # prunes them from the set of missing nodes.
1027 1031 def prune_parents(revlog, hasset, msngset):
1028 1032 haslst = hasset.keys()
1029 1033 haslst.sort(cmp_by_rev_func(revlog))
1030 1034 for node in haslst:
1031 1035 parentlst = [p for p in revlog.parents(node) if p != nullid]
1032 1036 while parentlst:
1033 1037 n = parentlst.pop()
1034 1038 if n not in hasset:
1035 1039 hasset[n] = 1
1036 1040 p = [p for p in revlog.parents(n) if p != nullid]
1037 1041 parentlst.extend(p)
1038 1042 for n in hasset:
1039 1043 msngset.pop(n, None)
1040 1044
1041 1045 # This is a function generating function used to set up an environment
1042 1046 # for the inner function to execute in.
1043 1047 def manifest_and_file_collector(changedfileset):
1044 1048 # This is an information gathering function that gathers
1045 1049 # information from each changeset node that goes out as part of
1046 1050 # the changegroup. The information gathered is a list of which
1047 1051 # manifest nodes are potentially required (the recipient may
1048 1052 # already have them) and total list of all files which were
1049 1053 # changed in any changeset in the changegroup.
1050 1054 #
1051 1055 # We also remember the first changenode we saw any manifest
1052 1056 # referenced by so we can later determine which changenode 'owns'
1053 1057 # the manifest.
1054 1058 def collect_manifests_and_files(clnode):
1055 1059 c = cl.read(clnode)
1056 1060 for f in c[3]:
1057 1061 # This is to make sure we only have one instance of each
1058 1062 # filename string for each filename.
1059 1063 changedfileset.setdefault(f, f)
1060 1064 msng_mnfst_set.setdefault(c[0], clnode)
1061 1065 return collect_manifests_and_files
1062 1066
1063 1067 # Figure out which manifest nodes (of the ones we think might be part
1064 1068 # of the changegroup) the recipient must know about and remove them
1065 1069 # from the changegroup.
1066 1070 def prune_manifests():
1067 1071 has_mnfst_set = {}
1068 1072 for n in msng_mnfst_set:
1069 1073 # If a 'missing' manifest thinks it belongs to a changenode
1070 1074 # the recipient is assumed to have, obviously the recipient
1071 1075 # must have that manifest.
1072 1076 linknode = cl.node(mnfst.linkrev(n))
1073 1077 if linknode in has_cl_set:
1074 1078 has_mnfst_set[n] = 1
1075 1079 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1076 1080
1077 1081 # Use the information collected in collect_manifests_and_files to say
1078 1082 # which changenode any manifestnode belongs to.
1079 1083 def lookup_manifest_link(mnfstnode):
1080 1084 return msng_mnfst_set[mnfstnode]
1081 1085
1082 1086 # A function generating function that sets up the initial environment
1083 1087 # the inner function.
1084 1088 def filenode_collector(changedfiles):
1085 1089 next_rev = [0]
1086 1090 # This gathers information from each manifestnode included in the
1087 1091 # changegroup about which filenodes the manifest node references
1088 1092 # so we can include those in the changegroup too.
1089 1093 #
1090 1094 # It also remembers which changenode each filenode belongs to. It
1091 1095 # does this by assuming the a filenode belongs to the changenode
1092 1096 # the first manifest that references it belongs to.
1093 1097 def collect_msng_filenodes(mnfstnode):
1094 1098 r = mnfst.rev(mnfstnode)
1095 1099 if r == next_rev[0]:
1096 1100 # If the last rev we looked at was the one just previous,
1097 1101 # we only need to see a diff.
1098 1102 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1099 1103 # For each line in the delta
1100 1104 for dline in delta.splitlines():
1101 1105 # get the filename and filenode for that line
1102 1106 f, fnode = dline.split('\0')
1103 1107 fnode = bin(fnode[:40])
1104 1108 f = changedfiles.get(f, None)
1105 1109 # And if the file is in the list of files we care
1106 1110 # about.
1107 1111 if f is not None:
1108 1112 # Get the changenode this manifest belongs to
1109 1113 clnode = msng_mnfst_set[mnfstnode]
1110 1114 # Create the set of filenodes for the file if
1111 1115 # there isn't one already.
1112 1116 ndset = msng_filenode_set.setdefault(f, {})
1113 1117 # And set the filenode's changelog node to the
1114 1118 # manifest's if it hasn't been set already.
1115 1119 ndset.setdefault(fnode, clnode)
1116 1120 else:
1117 1121 # Otherwise we need a full manifest.
1118 1122 m = mnfst.read(mnfstnode)
1119 1123 # For every file in we care about.
1120 1124 for f in changedfiles:
1121 1125 fnode = m.get(f, None)
1122 1126 # If it's in the manifest
1123 1127 if fnode is not None:
1124 1128 # See comments above.
1125 1129 clnode = msng_mnfst_set[mnfstnode]
1126 1130 ndset = msng_filenode_set.setdefault(f, {})
1127 1131 ndset.setdefault(fnode, clnode)
1128 1132 # Remember the revision we hope to see next.
1129 1133 next_rev[0] = r + 1
1130 1134 return collect_msng_filenodes
1131 1135
1132 1136 # We have a list of filenodes we think we need for a file, lets remove
1133 1137 # all those we now the recipient must have.
1134 1138 def prune_filenodes(f, filerevlog):
1135 1139 msngset = msng_filenode_set[f]
1136 1140 hasset = {}
1137 1141 # If a 'missing' filenode thinks it belongs to a changenode we
1138 1142 # assume the recipient must have, then the recipient must have
1139 1143 # that filenode.
1140 1144 for n in msngset:
1141 1145 clnode = cl.node(filerevlog.linkrev(n))
1142 1146 if clnode in has_cl_set:
1143 1147 hasset[n] = 1
1144 1148 prune_parents(filerevlog, hasset, msngset)
1145 1149
1146 1150 # A function generator function that sets up the a context for the
1147 1151 # inner function.
1148 1152 def lookup_filenode_link_func(fname):
1149 1153 msngset = msng_filenode_set[fname]
1150 1154 # Lookup the changenode the filenode belongs to.
1151 1155 def lookup_filenode_link(fnode):
1152 1156 return msngset[fnode]
1153 1157 return lookup_filenode_link
1154 1158
1155 1159 # Now that we have all theses utility functions to help out and
1156 1160 # logically divide up the task, generate the group.
1157 1161 def gengroup():
1158 1162 # The set of changed files starts empty.
1159 1163 changedfiles = {}
1160 1164 # Create a changenode group generator that will call our functions
1161 1165 # back to lookup the owning changenode and collect information.
1162 1166 group = cl.group(msng_cl_lst, identity,
1163 1167 manifest_and_file_collector(changedfiles))
1164 1168 for chnk in group:
1165 1169 yield chnk
1166 1170
1167 1171 # The list of manifests has been collected by the generator
1168 1172 # calling our functions back.
1169 1173 prune_manifests()
1170 1174 msng_mnfst_lst = msng_mnfst_set.keys()
1171 1175 # Sort the manifestnodes by revision number.
1172 1176 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1173 1177 # Create a generator for the manifestnodes that calls our lookup
1174 1178 # and data collection functions back.
1175 1179 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1176 1180 filenode_collector(changedfiles))
1177 1181 for chnk in group:
1178 1182 yield chnk
1179 1183
1180 1184 # These are no longer needed, dereference and toss the memory for
1181 1185 # them.
1182 1186 msng_mnfst_lst = None
1183 1187 msng_mnfst_set.clear()
1184 1188
1185 1189 changedfiles = changedfiles.keys()
1186 1190 changedfiles.sort()
1187 1191 # Go through all our files in order sorted by name.
1188 1192 for fname in changedfiles:
1189 1193 filerevlog = self.file(fname)
1190 1194 # Toss out the filenodes that the recipient isn't really
1191 1195 # missing.
1192 1196 prune_filenodes(fname, filerevlog)
1193 1197 msng_filenode_lst = msng_filenode_set[fname].keys()
1194 1198 # If any filenodes are left, generate the group for them,
1195 1199 # otherwise don't bother.
1196 1200 if len(msng_filenode_lst) > 0:
1197 1201 yield struct.pack(">l", len(fname) + 4) + fname
1198 1202 # Sort the filenodes by their revision #
1199 1203 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1200 1204 # Create a group generator and only pass in a changenode
1201 1205 # lookup function as we need to collect no information
1202 1206 # from filenodes.
1203 1207 group = filerevlog.group(msng_filenode_lst,
1204 1208 lookup_filenode_link_func(fname))
1205 1209 for chnk in group:
1206 1210 yield chnk
1207 1211 # Don't need this anymore, toss it to free memory.
1208 1212 del msng_filenode_set[fname]
1209 1213 # Signal that no more groups are left.
1210 1214 yield struct.pack(">l", 0)
1211 1215
1212 1216 return util.chunkbuffer(gengroup())
1213 1217
1214 1218 def changegroup(self, basenodes):
1215 1219 """Generate a changegroup of all nodes that we have that a recipient
1216 1220 doesn't.
1217 1221
1218 1222 This is much easier than the previous function as we can assume that
1219 1223 the recipient has any changenode we aren't sending them."""
1220 1224 cl = self.changelog
1221 1225 nodes = cl.nodesbetween(basenodes, None)[0]
1222 1226 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1223 1227
1224 1228 def identity(x):
1225 1229 return x
1226 1230
1227 1231 def gennodelst(revlog):
1228 1232 for r in xrange(0, revlog.count()):
1229 1233 n = revlog.node(r)
1230 1234 if revlog.linkrev(n) in revset:
1231 1235 yield n
1232 1236
1233 1237 def changed_file_collector(changedfileset):
1234 1238 def collect_changed_files(clnode):
1235 1239 c = cl.read(clnode)
1236 1240 for fname in c[3]:
1237 1241 changedfileset[fname] = 1
1238 1242 return collect_changed_files
1239 1243
1240 1244 def lookuprevlink_func(revlog):
1241 1245 def lookuprevlink(n):
1242 1246 return cl.node(revlog.linkrev(n))
1243 1247 return lookuprevlink
1244 1248
1245 1249 def gengroup():
1246 1250 # construct a list of all changed files
1247 1251 changedfiles = {}
1248 1252
1249 1253 for chnk in cl.group(nodes, identity,
1250 1254 changed_file_collector(changedfiles)):
1251 1255 yield chnk
1252 1256 changedfiles = changedfiles.keys()
1253 1257 changedfiles.sort()
1254 1258
1255 1259 mnfst = self.manifest
1256 1260 nodeiter = gennodelst(mnfst)
1257 1261 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1258 1262 yield chnk
1259 1263
1260 1264 for fname in changedfiles:
1261 1265 filerevlog = self.file(fname)
1262 1266 nodeiter = gennodelst(filerevlog)
1263 1267 nodeiter = list(nodeiter)
1264 1268 if nodeiter:
1265 1269 yield struct.pack(">l", len(fname) + 4) + fname
1266 1270 lookup = lookuprevlink_func(filerevlog)
1267 1271 for chnk in filerevlog.group(nodeiter, lookup):
1268 1272 yield chnk
1269 1273
1270 1274 yield struct.pack(">l", 0)
1271 1275
1272 1276 return util.chunkbuffer(gengroup())
1273 1277
1274 1278 def addchangegroup(self, source):
1275 1279
1276 1280 def getchunk():
1277 1281 d = source.read(4)
1278 1282 if not d: return ""
1279 1283 l = struct.unpack(">l", d)[0]
1280 1284 if l <= 4: return ""
1281 1285 d = source.read(l - 4)
1282 1286 if len(d) < l - 4:
1283 1287 raise repo.RepoError(_("premature EOF reading chunk"
1284 1288 " (got %d bytes, expected %d)")
1285 1289 % (len(d), l - 4))
1286 1290 return d
1287 1291
1288 1292 def getgroup():
1289 1293 while 1:
1290 1294 c = getchunk()
1291 1295 if not c: break
1292 1296 yield c
1293 1297
1294 1298 def csmap(x):
1295 1299 self.ui.debug(_("add changeset %s\n") % short(x))
1296 1300 return self.changelog.count()
1297 1301
1298 1302 def revmap(x):
1299 1303 return self.changelog.rev(x)
1300 1304
1301 1305 if not source: return
1302 1306 changesets = files = revisions = 0
1303 1307
1304 1308 tr = self.transaction()
1305 1309
1306 1310 oldheads = len(self.changelog.heads())
1307 1311
1308 1312 # pull off the changeset group
1309 1313 self.ui.status(_("adding changesets\n"))
1310 1314 co = self.changelog.tip()
1311 1315 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1312 1316 cnr, cor = map(self.changelog.rev, (cn, co))
1313 1317 if cn == nullid:
1314 1318 cnr = cor
1315 1319 changesets = cnr - cor
1316 1320
1317 1321 # pull off the manifest group
1318 1322 self.ui.status(_("adding manifests\n"))
1319 1323 mm = self.manifest.tip()
1320 1324 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1321 1325
1322 1326 # process the files
1323 1327 self.ui.status(_("adding file changes\n"))
1324 1328 while 1:
1325 1329 f = getchunk()
1326 1330 if not f: break
1327 1331 self.ui.debug(_("adding %s revisions\n") % f)
1328 1332 fl = self.file(f)
1329 1333 o = fl.count()
1330 1334 n = fl.addgroup(getgroup(), revmap, tr)
1331 1335 revisions += fl.count() - o
1332 1336 files += 1
1333 1337
1334 1338 newheads = len(self.changelog.heads())
1335 1339 heads = ""
1336 1340 if oldheads and newheads > oldheads:
1337 1341 heads = _(" (+%d heads)") % (newheads - oldheads)
1338 1342
1339 1343 self.ui.status(_("added %d changesets"
1340 1344 " with %d changes to %d files%s\n")
1341 1345 % (changesets, revisions, files, heads))
1342 1346
1343 1347 tr.close()
1344 1348
1345 1349 if changesets > 0:
1346 1350 if not self.hook("changegroup",
1347 1351 node=hex(self.changelog.node(cor+1))):
1348 1352 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1349 1353 return 1
1350 1354
1351 1355 for i in range(cor + 1, cnr + 1):
1352 1356 self.hook("commit", node=hex(self.changelog.node(i)))
1353 1357
1354 1358 return
1355 1359
1356 1360 def update(self, node, allow=False, force=False, choose=None,
1357 1361 moddirstate=True):
1358 1362 pl = self.dirstate.parents()
1359 1363 if not force and pl[1] != nullid:
1360 1364 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1361 1365 return 1
1362 1366
1363 1367 p1, p2 = pl[0], node
1364 1368 pa = self.changelog.ancestor(p1, p2)
1365 1369 m1n = self.changelog.read(p1)[0]
1366 1370 m2n = self.changelog.read(p2)[0]
1367 1371 man = self.manifest.ancestor(m1n, m2n)
1368 1372 m1 = self.manifest.read(m1n)
1369 1373 mf1 = self.manifest.readflags(m1n)
1370 1374 m2 = self.manifest.read(m2n)
1371 1375 mf2 = self.manifest.readflags(m2n)
1372 1376 ma = self.manifest.read(man)
1373 1377 mfa = self.manifest.readflags(man)
1374 1378
1375 1379 (c, a, d, u) = self.changes()
1376 1380
1377 1381 # is this a jump, or a merge? i.e. is there a linear path
1378 1382 # from p1 to p2?
1379 1383 linear_path = (pa == p1 or pa == p2)
1380 1384
1381 1385 # resolve the manifest to determine which files
1382 1386 # we care about merging
1383 1387 self.ui.note(_("resolving manifests\n"))
1384 1388 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1385 1389 (force, allow, moddirstate, linear_path))
1386 1390 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1387 1391 (short(man), short(m1n), short(m2n)))
1388 1392
1389 1393 merge = {}
1390 1394 get = {}
1391 1395 remove = []
1392 1396
1393 1397 # construct a working dir manifest
1394 1398 mw = m1.copy()
1395 1399 mfw = mf1.copy()
1396 1400 umap = dict.fromkeys(u)
1397 1401
1398 1402 for f in a + c + u:
1399 1403 mw[f] = ""
1400 1404 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1401 1405
1402 1406 if moddirstate:
1403 1407 wlock = self.wlock()
1404 1408
1405 1409 for f in d:
1406 1410 if f in mw: del mw[f]
1407 1411
1408 1412 # If we're jumping between revisions (as opposed to merging),
1409 1413 # and if neither the working directory nor the target rev has
1410 1414 # the file, then we need to remove it from the dirstate, to
1411 1415 # prevent the dirstate from listing the file when it is no
1412 1416 # longer in the manifest.
1413 1417 if moddirstate and linear_path and f not in m2:
1414 1418 self.dirstate.forget((f,))
1415 1419
1416 1420 # Compare manifests
1417 1421 for f, n in mw.iteritems():
1418 1422 if choose and not choose(f): continue
1419 1423 if f in m2:
1420 1424 s = 0
1421 1425
1422 1426 # is the wfile new since m1, and match m2?
1423 1427 if f not in m1:
1424 1428 t1 = self.wread(f)
1425 1429 t2 = self.file(f).read(m2[f])
1426 1430 if cmp(t1, t2) == 0:
1427 1431 n = m2[f]
1428 1432 del t1, t2
1429 1433
1430 1434 # are files different?
1431 1435 if n != m2[f]:
1432 1436 a = ma.get(f, nullid)
1433 1437 # are both different from the ancestor?
1434 1438 if n != a and m2[f] != a:
1435 1439 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1436 1440 # merge executable bits
1437 1441 # "if we changed or they changed, change in merge"
1438 1442 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1439 1443 mode = ((a^b) | (a^c)) ^ a
1440 1444 merge[f] = (m1.get(f, nullid), m2[f], mode)
1441 1445 s = 1
1442 1446 # are we clobbering?
1443 1447 # is remote's version newer?
1444 1448 # or are we going back in time?
1445 1449 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1446 1450 self.ui.debug(_(" remote %s is newer, get\n") % f)
1447 1451 get[f] = m2[f]
1448 1452 s = 1
1449 1453 elif f in umap:
1450 1454 # this unknown file is the same as the checkout
1451 1455 get[f] = m2[f]
1452 1456
1453 1457 if not s and mfw[f] != mf2[f]:
1454 1458 if force:
1455 1459 self.ui.debug(_(" updating permissions for %s\n") % f)
1456 1460 util.set_exec(self.wjoin(f), mf2[f])
1457 1461 else:
1458 1462 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1459 1463 mode = ((a^b) | (a^c)) ^ a
1460 1464 if mode != b:
1461 1465 self.ui.debug(_(" updating permissions for %s\n") % f)
1462 1466 util.set_exec(self.wjoin(f), mode)
1463 1467 del m2[f]
1464 1468 elif f in ma:
1465 1469 if n != ma[f]:
1466 1470 r = _("d")
1467 1471 if not force and (linear_path or allow):
1468 1472 r = self.ui.prompt(
1469 1473 (_(" local changed %s which remote deleted\n") % f) +
1470 1474 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1471 1475 if r == _("d"):
1472 1476 remove.append(f)
1473 1477 else:
1474 1478 self.ui.debug(_("other deleted %s\n") % f)
1475 1479 remove.append(f) # other deleted it
1476 1480 else:
1477 1481 # file is created on branch or in working directory
1478 1482 if force and f not in umap:
1479 1483 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1480 1484 remove.append(f)
1481 1485 elif n == m1.get(f, nullid): # same as parent
1482 1486 if p2 == pa: # going backwards?
1483 1487 self.ui.debug(_("remote deleted %s\n") % f)
1484 1488 remove.append(f)
1485 1489 else:
1486 1490 self.ui.debug(_("local modified %s, keeping\n") % f)
1487 1491 else:
1488 1492 self.ui.debug(_("working dir created %s, keeping\n") % f)
1489 1493
1490 1494 for f, n in m2.iteritems():
1491 1495 if choose and not choose(f): continue
1492 1496 if f[0] == "/": continue
1493 1497 if f in ma and n != ma[f]:
1494 1498 r = _("k")
1495 1499 if not force and (linear_path or allow):
1496 1500 r = self.ui.prompt(
1497 1501 (_("remote changed %s which local deleted\n") % f) +
1498 1502 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1499 1503 if r == _("k"): get[f] = n
1500 1504 elif f not in ma:
1501 1505 self.ui.debug(_("remote created %s\n") % f)
1502 1506 get[f] = n
1503 1507 else:
1504 1508 if force or p2 == pa: # going backwards?
1505 1509 self.ui.debug(_("local deleted %s, recreating\n") % f)
1506 1510 get[f] = n
1507 1511 else:
1508 1512 self.ui.debug(_("local deleted %s\n") % f)
1509 1513
1510 1514 del mw, m1, m2, ma
1511 1515
1512 1516 if force:
1513 1517 for f in merge:
1514 1518 get[f] = merge[f][1]
1515 1519 merge = {}
1516 1520
1517 1521 if linear_path or force:
1518 1522 # we don't need to do any magic, just jump to the new rev
1519 1523 branch_merge = False
1520 1524 p1, p2 = p2, nullid
1521 1525 else:
1522 1526 if not allow:
1523 1527 self.ui.status(_("this update spans a branch"
1524 1528 " affecting the following files:\n"))
1525 1529 fl = merge.keys() + get.keys()
1526 1530 fl.sort()
1527 1531 for f in fl:
1528 1532 cf = ""
1529 1533 if f in merge: cf = _(" (resolve)")
1530 1534 self.ui.status(" %s%s\n" % (f, cf))
1531 1535 self.ui.warn(_("aborting update spanning branches!\n"))
1532 1536 self.ui.status(_("(use update -m to merge across branches"
1533 1537 " or -C to lose changes)\n"))
1534 1538 return 1
1535 1539 branch_merge = True
1536 1540
1537 1541 # get the files we don't need to change
1538 1542 files = get.keys()
1539 1543 files.sort()
1540 1544 for f in files:
1541 1545 if f[0] == "/": continue
1542 1546 self.ui.note(_("getting %s\n") % f)
1543 1547 t = self.file(f).read(get[f])
1544 1548 self.wwrite(f, t)
1545 1549 util.set_exec(self.wjoin(f), mf2[f])
1546 1550 if moddirstate:
1547 1551 if branch_merge:
1548 1552 self.dirstate.update([f], 'n', st_mtime=-1)
1549 1553 else:
1550 1554 self.dirstate.update([f], 'n')
1551 1555
1552 1556 # merge the tricky bits
1553 1557 files = merge.keys()
1554 1558 files.sort()
1555 1559 for f in files:
1556 1560 self.ui.status(_("merging %s\n") % f)
1557 1561 my, other, flag = merge[f]
1558 1562 self.merge3(f, my, other)
1559 1563 util.set_exec(self.wjoin(f), flag)
1560 1564 if moddirstate:
1561 1565 if branch_merge:
1562 1566 # We've done a branch merge, mark this file as merged
1563 1567 # so that we properly record the merger later
1564 1568 self.dirstate.update([f], 'm')
1565 1569 else:
1566 1570 # We've update-merged a locally modified file, so
1567 1571 # we set the dirstate to emulate a normal checkout
1568 1572 # of that file some time in the past. Thus our
1569 1573 # merge will appear as a normal local file
1570 1574 # modification.
1571 1575 f_len = len(self.file(f).read(other))
1572 1576 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1573 1577
1574 1578 remove.sort()
1575 1579 for f in remove:
1576 1580 self.ui.note(_("removing %s\n") % f)
1577 1581 try:
1578 1582 util.unlink(self.wjoin(f))
1579 1583 except OSError, inst:
1580 1584 if inst.errno != errno.ENOENT:
1581 1585 self.ui.warn(_("update failed to remove %s: %s!\n") %
1582 1586 (f, inst.strerror))
1583 1587 if moddirstate:
1584 1588 if branch_merge:
1585 1589 self.dirstate.update(remove, 'r')
1586 1590 else:
1587 1591 self.dirstate.forget(remove)
1588 1592
1589 1593 if moddirstate:
1590 1594 self.dirstate.setparents(p1, p2)
1591 1595
1592 1596 def merge3(self, fn, my, other):
1593 1597 """perform a 3-way merge in the working directory"""
1594 1598
1595 1599 def temp(prefix, node):
1596 1600 pre = "%s~%s." % (os.path.basename(fn), prefix)
1597 1601 (fd, name) = tempfile.mkstemp("", pre)
1598 1602 f = os.fdopen(fd, "wb")
1599 1603 self.wwrite(fn, fl.read(node), f)
1600 1604 f.close()
1601 1605 return name
1602 1606
1603 1607 fl = self.file(fn)
1604 1608 base = fl.ancestor(my, other)
1605 1609 a = self.wjoin(fn)
1606 1610 b = temp("base", base)
1607 1611 c = temp("other", other)
1608 1612
1609 1613 self.ui.note(_("resolving %s\n") % fn)
1610 1614 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1611 1615 (fn, short(my), short(other), short(base)))
1612 1616
1613 1617 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1614 1618 or "hgmerge")
1615 1619 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1616 1620 if r:
1617 1621 self.ui.warn(_("merging %s failed!\n") % fn)
1618 1622
1619 1623 os.unlink(b)
1620 1624 os.unlink(c)
1621 1625
1622 1626 def verify(self):
1623 1627 filelinkrevs = {}
1624 1628 filenodes = {}
1625 1629 changesets = revisions = files = 0
1626 1630 errors = [0]
1627 1631 neededmanifests = {}
1628 1632
1629 1633 def err(msg):
1630 1634 self.ui.warn(msg + "\n")
1631 1635 errors[0] += 1
1632 1636
1633 1637 seen = {}
1634 1638 self.ui.status(_("checking changesets\n"))
1635 1639 d = self.changelog.checksize()
1636 1640 if d:
1637 1641 err(_("changeset data short %d bytes") % d)
1638 1642 for i in range(self.changelog.count()):
1639 1643 changesets += 1
1640 1644 n = self.changelog.node(i)
1641 1645 l = self.changelog.linkrev(n)
1642 1646 if l != i:
1643 1647 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1644 1648 if n in seen:
1645 1649 err(_("duplicate changeset at revision %d") % i)
1646 1650 seen[n] = 1
1647 1651
1648 1652 for p in self.changelog.parents(n):
1649 1653 if p not in self.changelog.nodemap:
1650 1654 err(_("changeset %s has unknown parent %s") %
1651 1655 (short(n), short(p)))
1652 1656 try:
1653 1657 changes = self.changelog.read(n)
1654 1658 except KeyboardInterrupt:
1655 1659 self.ui.warn(_("interrupted"))
1656 1660 raise
1657 1661 except Exception, inst:
1658 1662 err(_("unpacking changeset %s: %s") % (short(n), inst))
1659 1663
1660 1664 neededmanifests[changes[0]] = n
1661 1665
1662 1666 for f in changes[3]:
1663 1667 filelinkrevs.setdefault(f, []).append(i)
1664 1668
1665 1669 seen = {}
1666 1670 self.ui.status(_("checking manifests\n"))
1667 1671 d = self.manifest.checksize()
1668 1672 if d:
1669 1673 err(_("manifest data short %d bytes") % d)
1670 1674 for i in range(self.manifest.count()):
1671 1675 n = self.manifest.node(i)
1672 1676 l = self.manifest.linkrev(n)
1673 1677
1674 1678 if l < 0 or l >= self.changelog.count():
1675 1679 err(_("bad manifest link (%d) at revision %d") % (l, i))
1676 1680
1677 1681 if n in neededmanifests:
1678 1682 del neededmanifests[n]
1679 1683
1680 1684 if n in seen:
1681 1685 err(_("duplicate manifest at revision %d") % i)
1682 1686
1683 1687 seen[n] = 1
1684 1688
1685 1689 for p in self.manifest.parents(n):
1686 1690 if p not in self.manifest.nodemap:
1687 1691 err(_("manifest %s has unknown parent %s") %
1688 1692 (short(n), short(p)))
1689 1693
1690 1694 try:
1691 1695 delta = mdiff.patchtext(self.manifest.delta(n))
1692 1696 except KeyboardInterrupt:
1693 1697 self.ui.warn(_("interrupted"))
1694 1698 raise
1695 1699 except Exception, inst:
1696 1700 err(_("unpacking manifest %s: %s") % (short(n), inst))
1697 1701
1698 1702 ff = [ l.split('\0') for l in delta.splitlines() ]
1699 1703 for f, fn in ff:
1700 1704 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1701 1705
1702 1706 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1703 1707
1704 1708 for m,c in neededmanifests.items():
1705 1709 err(_("Changeset %s refers to unknown manifest %s") %
1706 1710 (short(m), short(c)))
1707 1711 del neededmanifests
1708 1712
1709 1713 for f in filenodes:
1710 1714 if f not in filelinkrevs:
1711 1715 err(_("file %s in manifest but not in changesets") % f)
1712 1716
1713 1717 for f in filelinkrevs:
1714 1718 if f not in filenodes:
1715 1719 err(_("file %s in changeset but not in manifest") % f)
1716 1720
1717 1721 self.ui.status(_("checking files\n"))
1718 1722 ff = filenodes.keys()
1719 1723 ff.sort()
1720 1724 for f in ff:
1721 1725 if f == "/dev/null": continue
1722 1726 files += 1
1723 1727 fl = self.file(f)
1724 1728 d = fl.checksize()
1725 1729 if d:
1726 1730 err(_("%s file data short %d bytes") % (f, d))
1727 1731
1728 1732 nodes = { nullid: 1 }
1729 1733 seen = {}
1730 1734 for i in range(fl.count()):
1731 1735 revisions += 1
1732 1736 n = fl.node(i)
1733 1737
1734 1738 if n in seen:
1735 1739 err(_("%s: duplicate revision %d") % (f, i))
1736 1740 if n not in filenodes[f]:
1737 1741 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1738 1742 else:
1739 1743 del filenodes[f][n]
1740 1744
1741 1745 flr = fl.linkrev(n)
1742 1746 if flr not in filelinkrevs[f]:
1743 1747 err(_("%s:%s points to unexpected changeset %d")
1744 1748 % (f, short(n), flr))
1745 1749 else:
1746 1750 filelinkrevs[f].remove(flr)
1747 1751
1748 1752 # verify contents
1749 1753 try:
1750 1754 t = fl.read(n)
1751 1755 except KeyboardInterrupt:
1752 1756 self.ui.warn(_("interrupted"))
1753 1757 raise
1754 1758 except Exception, inst:
1755 1759 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1756 1760
1757 1761 # verify parents
1758 1762 (p1, p2) = fl.parents(n)
1759 1763 if p1 not in nodes:
1760 1764 err(_("file %s:%s unknown parent 1 %s") %
1761 1765 (f, short(n), short(p1)))
1762 1766 if p2 not in nodes:
1763 1767 err(_("file %s:%s unknown parent 2 %s") %
1764 1768 (f, short(n), short(p1)))
1765 1769 nodes[n] = 1
1766 1770
1767 1771 # cross-check
1768 1772 for node in filenodes[f]:
1769 1773 err(_("node %s in manifests not in %s") % (hex(node), f))
1770 1774
1771 1775 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1772 1776 (files, changesets, revisions))
1773 1777
1774 1778 if errors[0]:
1775 1779 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1776 1780 return 1
@@ -1,890 +1,888 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
17 17
18 18 def hash(text, p1, p2):
19 19 """generate a hash from the given text and its parent hashes
20 20
21 21 This hash combines both the current file contents and its history
22 22 in a manner that makes it easy to distinguish nodes with the same
23 23 content in the revision graph.
24 24 """
25 25 l = [p1, p2]
26 26 l.sort()
27 27 s = sha.new(l[0])
28 28 s.update(l[1])
29 29 s.update(text)
30 30 return s.digest()
31 31
32 32 def compress(text):
33 33 """ generate a possibly-compressed representation of text """
34 34 if not text: return ("", text)
35 35 if len(text) < 44:
36 36 if text[0] == '\0': return ("", text)
37 37 return ('u', text)
38 38 bin = zlib.compress(text)
39 39 if len(bin) > len(text):
40 40 if text[0] == '\0': return ("", text)
41 41 return ('u', text)
42 42 return ("", bin)
43 43
44 44 def decompress(bin):
45 45 """ decompress the given input """
46 46 if not bin: return bin
47 47 t = bin[0]
48 48 if t == '\0': return bin
49 49 if t == 'x': return zlib.decompress(bin)
50 50 if t == 'u': return bin[1:]
51 51 raise RevlogError(_("unknown compression type %s") % t)
52 52
53 53 indexformat = ">4l20s20s20s"
54 54
55 55 class lazyparser:
56 56 """
57 57 this class avoids the need to parse the entirety of large indices
58 58
59 59 By default we parse and load 1000 entries at a time.
60 60
61 61 If no position is specified, we load the whole index, and replace
62 62 the lazy objects in revlog with the underlying objects for
63 63 efficiency in cases where we look at most of the nodes.
64 64 """
65 65 def __init__(self, data, revlog):
66 66 self.data = data
67 67 self.s = struct.calcsize(indexformat)
68 68 self.l = len(data)/self.s
69 69 self.index = [None] * self.l
70 70 self.map = {nullid: -1}
71 71 self.all = 0
72 72 self.revlog = revlog
73 73
74 74 def trunc(self, pos):
75 75 self.l = pos/self.s
76 76
77 77 def load(self, pos=None):
78 78 if self.all: return
79 79 if pos is not None:
80 80 block = pos / 1000
81 81 i = block * 1000
82 82 end = min(self.l, i + 1000)
83 83 else:
84 84 self.all = 1
85 85 i = 0
86 86 end = self.l
87 87 self.revlog.index = self.index
88 88 self.revlog.nodemap = self.map
89 89
90 90 while i < end:
91 91 d = self.data[i * self.s: (i + 1) * self.s]
92 92 e = struct.unpack(indexformat, d)
93 93 self.index[i] = e
94 94 self.map[e[6]] = i
95 95 i += 1
96 96
97 97 class lazyindex:
98 98 """a lazy version of the index array"""
99 99 def __init__(self, parser):
100 100 self.p = parser
101 101 def __len__(self):
102 102 return len(self.p.index)
103 103 def load(self, pos):
104 104 if pos < 0:
105 105 pos += len(self.p.index)
106 106 self.p.load(pos)
107 107 return self.p.index[pos]
108 108 def __getitem__(self, pos):
109 109 return self.p.index[pos] or self.load(pos)
110 110 def __delitem__(self, pos):
111 111 del self.p.index[pos]
112 112 def append(self, e):
113 113 self.p.index.append(e)
114 114 def trunc(self, pos):
115 115 self.p.trunc(pos)
116 116
117 117 class lazymap:
118 118 """a lazy version of the node map"""
119 119 def __init__(self, parser):
120 120 self.p = parser
121 121 def load(self, key):
122 122 if self.p.all: return
123 123 n = self.p.data.find(key)
124 124 if n < 0:
125 125 raise KeyError(key)
126 126 pos = n / self.p.s
127 127 self.p.load(pos)
128 128 def __contains__(self, key):
129 129 self.p.load()
130 130 return key in self.p.map
131 131 def __iter__(self):
132 132 yield nullid
133 133 for i in xrange(self.p.l):
134 134 try:
135 135 yield self.p.index[i][6]
136 136 except:
137 137 self.p.load(i)
138 138 yield self.p.index[i][6]
139 139 def __getitem__(self, key):
140 140 try:
141 141 return self.p.map[key]
142 142 except KeyError:
143 143 try:
144 144 self.load(key)
145 145 return self.p.map[key]
146 146 except KeyError:
147 147 raise KeyError("node " + hex(key))
148 148 def __setitem__(self, key, val):
149 149 self.p.map[key] = val
150 150 def __delitem__(self, key):
151 151 del self.p.map[key]
152 152
153 153 class RevlogError(Exception): pass
154 154
155 155 class revlog:
156 156 """
157 157 the underlying revision storage object
158 158
159 159 A revlog consists of two parts, an index and the revision data.
160 160
161 161 The index is a file with a fixed record size containing
162 162 information on each revision, includings its nodeid (hash), the
163 163 nodeids of its parents, the position and offset of its data within
164 164 the data file, and the revision it's based on. Finally, each entry
165 165 contains a linkrev entry that can serve as a pointer to external
166 166 data.
167 167
168 168 The revision data itself is a linear collection of data chunks.
169 169 Each chunk represents a revision and is usually represented as a
170 170 delta against the previous chunk. To bound lookup time, runs of
171 171 deltas are limited to about 2 times the length of the original
172 172 version data. This makes retrieval of a version proportional to
173 173 its size, or O(1) relative to the number of revisions.
174 174
175 175 Both pieces of the revlog are written to in an append-only
176 176 fashion, which means we never need to rewrite a file to insert or
177 177 remove data, and can use some simple techniques to avoid the need
178 178 for locking while reading.
179 179 """
180 180 def __init__(self, opener, indexfile, datafile):
181 181 """
182 182 create a revlog object
183 183
184 184 opener is a function that abstracts the file opening operation
185 185 and can be used to implement COW semantics or the like.
186 186 """
187 187 self.indexfile = indexfile
188 188 self.datafile = datafile
189 189 self.opener = opener
190 190 self.cache = None
191 191
192 192 try:
193 193 i = self.opener(self.indexfile).read()
194 194 except IOError, inst:
195 195 if inst.errno != errno.ENOENT:
196 196 raise
197 197 i = ""
198 198
199 199 if len(i) > 10000:
200 200 # big index, let's parse it on demand
201 201 parser = lazyparser(i, self)
202 202 self.index = lazyindex(parser)
203 203 self.nodemap = lazymap(parser)
204 204 else:
205 205 s = struct.calcsize(indexformat)
206 206 l = len(i) / s
207 207 self.index = [None] * l
208 208 m = [None] * l
209 209
210 210 n = 0
211 211 for f in xrange(0, len(i), s):
212 212 # offset, size, base, linkrev, p1, p2, nodeid
213 213 e = struct.unpack(indexformat, i[f:f + s])
214 214 m[n] = (e[6], n)
215 215 self.index[n] = e
216 216 n += 1
217 217
218 218 self.nodemap = dict(m)
219 219 self.nodemap[nullid] = -1
220 220
221 221 def tip(self): return self.node(len(self.index) - 1)
222 222 def count(self): return len(self.index)
223 223 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
224 224 def rev(self, node):
225 225 try:
226 226 return self.nodemap[node]
227 227 except KeyError:
228 228 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
229 229 def linkrev(self, node): return self.index[self.rev(node)][3]
230 230 def parents(self, node):
231 231 if node == nullid: return (nullid, nullid)
232 232 return self.index[self.rev(node)][4:6]
233 233
234 234 def start(self, rev): return self.index[rev][0]
235 235 def length(self, rev): return self.index[rev][1]
236 236 def end(self, rev): return self.start(rev) + self.length(rev)
237 237 def base(self, rev): return self.index[rev][2]
238 238
239 239 def reachable(self, rev, stop=None):
240 240 reachable = {}
241 241 visit = [rev]
242 242 reachable[rev] = 1
243 243 if stop:
244 244 stopn = self.rev(stop)
245 245 else:
246 246 stopn = 0
247 247 while visit:
248 248 n = visit.pop(0)
249 249 if n == stop:
250 250 continue
251 251 if n == nullid:
252 252 continue
253 253 for p in self.parents(n):
254 254 if self.rev(p) < stopn:
255 255 continue
256 256 if p not in reachable:
257 257 reachable[p] = 1
258 258 visit.append(p)
259 259 return reachable
260 260
261 261 def nodesbetween(self, roots=None, heads=None):
262 262 """Return a tuple containing three elements. Elements 1 and 2 contain
263 263 a final list bases and heads after all the unreachable ones have been
264 264 pruned. Element 0 contains a topologically sorted list of all
265 265
266 266 nodes that satisfy these constraints:
267 267 1. All nodes must be descended from a node in roots (the nodes on
268 268 roots are considered descended from themselves).
269 269 2. All nodes must also be ancestors of a node in heads (the nodes in
270 270 heads are considered to be their own ancestors).
271 271
272 272 If roots is unspecified, nullid is assumed as the only root.
273 273 If heads is unspecified, it is taken to be the output of the
274 274 heads method (i.e. a list of all nodes in the repository that
275 275 have no children)."""
276 276 nonodes = ([], [], [])
277 277 if roots is not None:
278 278 roots = list(roots)
279 279 if not roots:
280 280 return nonodes
281 281 lowestrev = min([self.rev(n) for n in roots])
282 282 else:
283 283 roots = [nullid] # Everybody's a descendent of nullid
284 284 lowestrev = -1
285 285 if (lowestrev == -1) and (heads is None):
286 286 # We want _all_ the nodes!
287 287 return ([self.node(r) for r in xrange(0, self.count())],
288 288 [nullid], list(self.heads()))
289 289 if heads is None:
290 290 # All nodes are ancestors, so the latest ancestor is the last
291 291 # node.
292 292 highestrev = self.count() - 1
293 293 # Set ancestors to None to signal that every node is an ancestor.
294 294 ancestors = None
295 295 # Set heads to an empty dictionary for later discovery of heads
296 296 heads = {}
297 297 else:
298 298 heads = list(heads)
299 299 if not heads:
300 300 return nonodes
301 301 ancestors = {}
302 302 # Start at the top and keep marking parents until we're done.
303 303 nodestotag = heads[:]
304 304 # Turn heads into a dictionary so we can remove 'fake' heads.
305 305 # Also, later we will be using it to filter out the heads we can't
306 306 # find from roots.
307 307 heads = dict.fromkeys(heads, 0)
308 308 # Remember where the top was so we can use it as a limit later.
309 309 highestrev = max([self.rev(n) for n in nodestotag])
310 310 while nodestotag:
311 311 # grab a node to tag
312 312 n = nodestotag.pop()
313 313 # Never tag nullid
314 314 if n == nullid:
315 315 continue
316 316 # A node's revision number represents its place in a
317 317 # topologically sorted list of nodes.
318 318 r = self.rev(n)
319 319 if r >= lowestrev:
320 320 if n not in ancestors:
321 321 # If we are possibly a descendent of one of the roots
322 322 # and we haven't already been marked as an ancestor
323 323 ancestors[n] = 1 # Mark as ancestor
324 324 # Add non-nullid parents to list of nodes to tag.
325 325 nodestotag.extend([p for p in self.parents(n) if
326 326 p != nullid])
327 327 elif n in heads: # We've seen it before, is it a fake head?
328 328 # So it is, real heads should not be the ancestors of
329 329 # any other heads.
330 330 heads.pop(n)
331 331 if not ancestors:
332 332 return nonodes
333 333 # Now that we have our set of ancestors, we want to remove any
334 334 # roots that are not ancestors.
335 335
336 336 # If one of the roots was nullid, everything is included anyway.
337 337 if lowestrev > -1:
338 338 # But, since we weren't, let's recompute the lowest rev to not
339 339 # include roots that aren't ancestors.
340 340
341 341 # Filter out roots that aren't ancestors of heads
342 342 roots = [n for n in roots if n in ancestors]
343 343 # Recompute the lowest revision
344 344 if roots:
345 345 lowestrev = min([self.rev(n) for n in roots])
346 346 else:
347 347 # No more roots? Return empty list
348 348 return nonodes
349 349 else:
350 350 # We are descending from nullid, and don't need to care about
351 351 # any other roots.
352 352 lowestrev = -1
353 353 roots = [nullid]
354 354 # Transform our roots list into a 'set' (i.e. a dictionary where the
355 355 # values don't matter.
356 356 descendents = dict.fromkeys(roots, 1)
357 357 # Also, keep the original roots so we can filter out roots that aren't
358 358 # 'real' roots (i.e. are descended from other roots).
359 359 roots = descendents.copy()
360 360 # Our topologically sorted list of output nodes.
361 361 orderedout = []
362 362 # Don't start at nullid since we don't want nullid in our output list,
363 363 # and if nullid shows up in descedents, empty parents will look like
364 364 # they're descendents.
365 365 for r in xrange(max(lowestrev, 0), highestrev + 1):
366 366 n = self.node(r)
367 367 isdescendent = False
368 368 if lowestrev == -1: # Everybody is a descendent of nullid
369 369 isdescendent = True
370 370 elif n in descendents:
371 371 # n is already a descendent
372 372 isdescendent = True
373 373 # This check only needs to be done here because all the roots
374 374 # will start being marked is descendents before the loop.
375 375 if n in roots:
376 376 # If n was a root, check if it's a 'real' root.
377 377 p = tuple(self.parents(n))
378 378 # If any of its parents are descendents, it's not a root.
379 379 if (p[0] in descendents) or (p[1] in descendents):
380 380 roots.pop(n)
381 381 else:
382 382 p = tuple(self.parents(n))
383 383 # A node is a descendent if either of its parents are
384 384 # descendents. (We seeded the dependents list with the roots
385 385 # up there, remember?)
386 386 if (p[0] in descendents) or (p[1] in descendents):
387 387 descendents[n] = 1
388 388 isdescendent = True
389 389 if isdescendent and ((ancestors is None) or (n in ancestors)):
390 390 # Only include nodes that are both descendents and ancestors.
391 391 orderedout.append(n)
392 392 if (ancestors is not None) and (n in heads):
393 393 # We're trying to figure out which heads are reachable
394 394 # from roots.
395 395 # Mark this head as having been reached
396 396 heads[n] = 1
397 397 elif ancestors is None:
398 398 # Otherwise, we're trying to discover the heads.
399 399 # Assume this is a head because if it isn't, the next step
400 400 # will eventually remove it.
401 401 heads[n] = 1
402 402 # But, obviously its parents aren't.
403 403 for p in self.parents(n):
404 404 heads.pop(p, None)
405 405 heads = [n for n in heads.iterkeys() if heads[n] != 0]
406 406 roots = roots.keys()
407 407 assert orderedout
408 408 assert roots
409 409 assert heads
410 410 return (orderedout, roots, heads)
411 411
412 def heads(self, stop=None):
413 """return the list of all nodes that have no children"""
414 p = {}
415 h = []
416 stoprev = 0
417 if stop and stop in self.nodemap:
418 stoprev = self.rev(stop)
412 def heads(self, start=nullid):
413 """return the list of all nodes that have no children
414 if start is specified, only heads that are children of
415 start will be returned"""
416 reachable = {start: 1}
417 heads = {start: 1}
418 startrev = self.rev(start)
419 419
420 for r in range(self.count() - 1, -1, -1):
420 for r in xrange(startrev + 1, self.count()):
421 421 n = self.node(r)
422 if n not in p:
423 h.append(n)
424 if n == stop:
425 break
426 if r < stoprev:
427 break
428 422 for pn in self.parents(n):
429 p[pn] = 1
430 return h
423 if pn in reachable:
424 reachable[n] = 1
425 heads[n] = 1
426 if pn in heads:
427 del heads[pn]
428 return heads.keys()
431 429
432 430 def children(self, node):
433 431 """find the children of a given node"""
434 432 c = []
435 433 p = self.rev(node)
436 434 for r in range(p + 1, self.count()):
437 435 n = self.node(r)
438 436 for pn in self.parents(n):
439 437 if pn == node:
440 438 c.append(n)
441 439 continue
442 440 elif pn == nullid:
443 441 continue
444 442 return c
445 443
446 444 def lookup(self, id):
447 445 """locate a node based on revision number or subset of hex nodeid"""
448 446 try:
449 447 rev = int(id)
450 448 if str(rev) != id: raise ValueError
451 449 if rev < 0: rev = self.count() + rev
452 450 if rev < 0 or rev >= self.count(): raise ValueError
453 451 return self.node(rev)
454 452 except (ValueError, OverflowError):
455 453 c = []
456 454 for n in self.nodemap:
457 455 if hex(n).startswith(id):
458 456 c.append(n)
459 457 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
460 458 if len(c) < 1: raise RevlogError(_("No match found"))
461 459 return c[0]
462 460
463 461 return None
464 462
465 463 def diff(self, a, b):
466 464 """return a delta between two revisions"""
467 465 return mdiff.textdiff(a, b)
468 466
469 467 def patches(self, t, pl):
470 468 """apply a list of patches to a string"""
471 469 return mdiff.patches(t, pl)
472 470
473 471 def delta(self, node):
474 472 """return or calculate a delta between a node and its predecessor"""
475 473 r = self.rev(node)
476 474 b = self.base(r)
477 475 if r == b:
478 476 return self.diff(self.revision(self.node(r - 1)),
479 477 self.revision(node))
480 478 else:
481 479 f = self.opener(self.datafile)
482 480 f.seek(self.start(r))
483 481 data = f.read(self.length(r))
484 482 return decompress(data)
485 483
486 484 def revision(self, node):
487 485 """return an uncompressed revision of a given"""
488 486 if node == nullid: return ""
489 487 if self.cache and self.cache[0] == node: return self.cache[2]
490 488
491 489 # look up what we need to read
492 490 text = None
493 491 rev = self.rev(node)
494 492 start, length, base, link, p1, p2, node = self.index[rev]
495 493 end = start + length
496 494 if base != rev: start = self.start(base)
497 495
498 496 # do we have useful data cached?
499 497 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
500 498 base = self.cache[1]
501 499 start = self.start(base + 1)
502 500 text = self.cache[2]
503 501 last = 0
504 502
505 503 f = self.opener(self.datafile)
506 504 f.seek(start)
507 505 data = f.read(end - start)
508 506
509 507 if text is None:
510 508 last = self.length(base)
511 509 text = decompress(data[:last])
512 510
513 511 bins = []
514 512 for r in xrange(base + 1, rev + 1):
515 513 s = self.length(r)
516 514 bins.append(decompress(data[last:last + s]))
517 515 last = last + s
518 516
519 517 text = mdiff.patches(text, bins)
520 518
521 519 if node != hash(text, p1, p2):
522 520 raise RevlogError(_("integrity check failed on %s:%d")
523 521 % (self.datafile, rev))
524 522
525 523 self.cache = (node, rev, text)
526 524 return text
527 525
528 526 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
529 527 """add a revision to the log
530 528
531 529 text - the revision data to add
532 530 transaction - the transaction object used for rollback
533 531 link - the linkrev data to add
534 532 p1, p2 - the parent nodeids of the revision
535 533 d - an optional precomputed delta
536 534 """
537 535 if text is None: text = ""
538 536 if p1 is None: p1 = self.tip()
539 537 if p2 is None: p2 = nullid
540 538
541 539 node = hash(text, p1, p2)
542 540
543 541 if node in self.nodemap:
544 542 return node
545 543
546 544 n = self.count()
547 545 t = n - 1
548 546
549 547 if n:
550 548 base = self.base(t)
551 549 start = self.start(base)
552 550 end = self.end(t)
553 551 if not d:
554 552 prev = self.revision(self.tip())
555 553 d = self.diff(prev, str(text))
556 554 data = compress(d)
557 555 l = len(data[1]) + len(data[0])
558 556 dist = end - start + l
559 557
560 558 # full versions are inserted when the needed deltas
561 559 # become comparable to the uncompressed text
562 560 if not n or dist > len(text) * 2:
563 561 data = compress(text)
564 562 l = len(data[1]) + len(data[0])
565 563 base = n
566 564 else:
567 565 base = self.base(t)
568 566
569 567 offset = 0
570 568 if t >= 0:
571 569 offset = self.end(t)
572 570
573 571 e = (offset, l, base, link, p1, p2, node)
574 572
575 573 self.index.append(e)
576 574 self.nodemap[node] = n
577 575 entry = struct.pack(indexformat, *e)
578 576
579 577 transaction.add(self.datafile, e[0])
580 578 f = self.opener(self.datafile, "a")
581 579 if data[0]:
582 580 f.write(data[0])
583 581 f.write(data[1])
584 582 transaction.add(self.indexfile, n * len(entry))
585 583 self.opener(self.indexfile, "a").write(entry)
586 584
587 585 self.cache = (node, n, text)
588 586 return node
589 587
590 588 def ancestor(self, a, b):
591 589 """calculate the least common ancestor of nodes a and b"""
592 590 # calculate the distance of every node from root
593 591 dist = {nullid: 0}
594 592 for i in xrange(self.count()):
595 593 n = self.node(i)
596 594 p1, p2 = self.parents(n)
597 595 dist[n] = max(dist[p1], dist[p2]) + 1
598 596
599 597 # traverse ancestors in order of decreasing distance from root
600 598 def ancestors(node):
601 599 # we store negative distances because heap returns smallest member
602 600 h = [(-dist[node], node)]
603 601 seen = {}
604 602 earliest = self.count()
605 603 while h:
606 604 d, n = heapq.heappop(h)
607 605 if n not in seen:
608 606 seen[n] = 1
609 607 r = self.rev(n)
610 608 yield (-d, n)
611 609 for p in self.parents(n):
612 610 heapq.heappush(h, (-dist[p], p))
613 611
614 612 def generations(node):
615 613 sg, s = None, {}
616 614 for g,n in ancestors(node):
617 615 if g != sg:
618 616 if sg:
619 617 yield sg, s
620 618 sg, s = g, {n:1}
621 619 else:
622 620 s[n] = 1
623 621 yield sg, s
624 622
625 623 x = generations(a)
626 624 y = generations(b)
627 625 gx = x.next()
628 626 gy = y.next()
629 627
630 628 # increment each ancestor list until it is closer to root than
631 629 # the other, or they match
632 630 while 1:
633 631 #print "ancestor gen %s %s" % (gx[0], gy[0])
634 632 if gx[0] == gy[0]:
635 633 # find the intersection
636 634 i = [ n for n in gx[1] if n in gy[1] ]
637 635 if i:
638 636 return i[0]
639 637 else:
640 638 #print "next"
641 639 gy = y.next()
642 640 gx = x.next()
643 641 elif gx[0] < gy[0]:
644 642 #print "next y"
645 643 gy = y.next()
646 644 else:
647 645 #print "next x"
648 646 gx = x.next()
649 647
650 648 def group(self, nodelist, lookup, infocollect = None):
651 649 """calculate a delta group
652 650
653 651 Given a list of changeset revs, return a set of deltas and
654 652 metadata corresponding to nodes. the first delta is
655 653 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
656 654 have this parent as it has all history before these
657 655 changesets. parent is parent[0]
658 656 """
659 657 revs = [self.rev(n) for n in nodelist]
660 658 needed = dict.fromkeys(revs, 1)
661 659
662 660 # if we don't have any revisions touched by these changesets, bail
663 661 if not revs:
664 662 yield struct.pack(">l", 0)
665 663 return
666 664
667 665 # add the parent of the first rev
668 666 p = self.parents(self.node(revs[0]))[0]
669 667 revs.insert(0, self.rev(p))
670 668
671 669 # for each delta that isn't contiguous in the log, we need to
672 670 # reconstruct the base, reconstruct the result, and then
673 671 # calculate the delta. We also need to do this where we've
674 672 # stored a full version and not a delta
675 673 for i in xrange(0, len(revs) - 1):
676 674 a, b = revs[i], revs[i + 1]
677 675 if a + 1 != b or self.base(b) == b:
678 676 for j in xrange(self.base(a), a + 1):
679 677 needed[j] = 1
680 678 for j in xrange(self.base(b), b + 1):
681 679 needed[j] = 1
682 680
683 681 # calculate spans to retrieve from datafile
684 682 needed = needed.keys()
685 683 needed.sort()
686 684 spans = []
687 685 oo = -1
688 686 ol = 0
689 687 for n in needed:
690 688 if n < 0: continue
691 689 o = self.start(n)
692 690 l = self.length(n)
693 691 if oo + ol == o: # can we merge with the previous?
694 692 nl = spans[-1][2]
695 693 nl.append((n, l))
696 694 ol += l
697 695 spans[-1] = (oo, ol, nl)
698 696 else:
699 697 oo = o
700 698 ol = l
701 699 spans.append((oo, ol, [(n, l)]))
702 700
703 701 # read spans in, divide up chunks
704 702 chunks = {}
705 703 for span in spans:
706 704 # we reopen the file for each span to make http happy for now
707 705 f = self.opener(self.datafile)
708 706 f.seek(span[0])
709 707 data = f.read(span[1])
710 708
711 709 # divide up the span
712 710 pos = 0
713 711 for r, l in span[2]:
714 712 chunks[r] = decompress(data[pos: pos + l])
715 713 pos += l
716 714
717 715 # helper to reconstruct intermediate versions
718 716 def construct(text, base, rev):
719 717 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
720 718 return mdiff.patches(text, bins)
721 719
722 720 # build deltas
723 721 deltas = []
724 722 for d in xrange(0, len(revs) - 1):
725 723 a, b = revs[d], revs[d + 1]
726 724 n = self.node(b)
727 725
728 726 if infocollect is not None:
729 727 infocollect(n)
730 728
731 729 # do we need to construct a new delta?
732 730 if a + 1 != b or self.base(b) == b:
733 731 if a >= 0:
734 732 base = self.base(a)
735 733 ta = chunks[self.base(a)]
736 734 ta = construct(ta, base, a)
737 735 else:
738 736 ta = ""
739 737
740 738 base = self.base(b)
741 739 if a > base:
742 740 base = a
743 741 tb = ta
744 742 else:
745 743 tb = chunks[self.base(b)]
746 744 tb = construct(tb, base, b)
747 745 d = self.diff(ta, tb)
748 746 else:
749 747 d = chunks[b]
750 748
751 749 p = self.parents(n)
752 750 meta = n + p[0] + p[1] + lookup(n)
753 751 l = struct.pack(">l", len(meta) + len(d) + 4)
754 752 yield l
755 753 yield meta
756 754 yield d
757 755
758 756 yield struct.pack(">l", 0)
759 757
760 758 def addgroup(self, revs, linkmapper, transaction, unique=0):
761 759 """
762 760 add a delta group
763 761
764 762 given a set of deltas, add them to the revision log. the
765 763 first delta is against its parent, which should be in our
766 764 log, the rest are against the previous delta.
767 765 """
768 766
769 767 #track the base of the current delta log
770 768 r = self.count()
771 769 t = r - 1
772 770 node = nullid
773 771
774 772 base = prev = -1
775 773 start = end = measure = 0
776 774 if r:
777 775 start = self.start(self.base(t))
778 776 end = self.end(t)
779 777 measure = self.length(self.base(t))
780 778 base = self.base(t)
781 779 prev = self.tip()
782 780
783 781 transaction.add(self.datafile, end)
784 782 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
785 783 dfh = self.opener(self.datafile, "a")
786 784 ifh = self.opener(self.indexfile, "a")
787 785
788 786 # loop through our set of deltas
789 787 chain = None
790 788 for chunk in revs:
791 789 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
792 790 link = linkmapper(cs)
793 791 if node in self.nodemap:
794 792 # this can happen if two branches make the same change
795 793 # if unique:
796 794 # raise RevlogError(_("already have %s") % hex(node[:4]))
797 795 chain = node
798 796 continue
799 797 delta = chunk[80:]
800 798
801 799 for p in (p1, p2):
802 800 if not p in self.nodemap:
803 801 raise RevlogError(_("unknown parent %s") % short(p1))
804 802
805 803 if not chain:
806 804 # retrieve the parent revision of the delta chain
807 805 chain = p1
808 806 if not chain in self.nodemap:
809 807 raise RevlogError(_("unknown base %s") % short(chain[:4]))
810 808
811 809 # full versions are inserted when the needed deltas become
812 810 # comparable to the uncompressed text or when the previous
813 811 # version is not the one we have a delta against. We use
814 812 # the size of the previous full rev as a proxy for the
815 813 # current size.
816 814
817 815 if chain == prev:
818 816 tempd = compress(delta)
819 817 cdelta = tempd[0] + tempd[1]
820 818
821 819 if chain != prev or (end - start + len(cdelta)) > measure * 2:
822 820 # flush our writes here so we can read it in revision
823 821 dfh.flush()
824 822 ifh.flush()
825 823 text = self.revision(chain)
826 824 text = self.patches(text, [delta])
827 825 chk = self.addrevision(text, transaction, link, p1, p2)
828 826 if chk != node:
829 827 raise RevlogError(_("consistency error adding group"))
830 828 measure = len(text)
831 829 else:
832 830 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
833 831 self.index.append(e)
834 832 self.nodemap[node] = r
835 833 dfh.write(cdelta)
836 834 ifh.write(struct.pack(indexformat, *e))
837 835
838 836 t, r, chain, prev = r, r + 1, node, node
839 837 start = self.start(self.base(t))
840 838 end = self.end(t)
841 839
842 840 dfh.close()
843 841 ifh.close()
844 842 return node
845 843
846 844 def strip(self, rev, minlink):
847 845 if self.count() == 0 or rev >= self.count():
848 846 return
849 847
850 848 # When stripping away a revision, we need to make sure it
851 849 # does not actually belong to an older changeset.
852 850 # The minlink parameter defines the oldest revision
853 851 # we're allowed to strip away.
854 852 while minlink > self.index[rev][3]:
855 853 rev += 1
856 854 if rev >= self.count():
857 855 return
858 856
859 857 # first truncate the files on disk
860 858 end = self.start(rev)
861 859 self.opener(self.datafile, "a").truncate(end)
862 860 end = rev * struct.calcsize(indexformat)
863 861 self.opener(self.indexfile, "a").truncate(end)
864 862
865 863 # then reset internal state in memory to forget those revisions
866 864 self.cache = None
867 865 for p in self.index[rev:]:
868 866 del self.nodemap[p[6]]
869 867 del self.index[rev:]
870 868
871 869 # truncating the lazyindex also truncates the lazymap.
872 870 if isinstance(self.index, lazyindex):
873 871 self.index.trunc(end)
874 872
875 873
876 874 def checksize(self):
877 875 expected = 0
878 876 if self.count():
879 877 expected = self.end(self.count() - 1)
880 878 try:
881 879 f = self.opener(self.datafile)
882 880 f.seek(0, 2)
883 881 actual = f.tell()
884 882 return expected - actual
885 883 except IOError, inst:
886 884 if inst.errno == errno.ENOENT:
887 885 return 0
888 886 raise
889 887
890 888
General Comments 0
You need to be logged in to leave comments. Login now