##// END OF EJS Templates
Fixes to "hg heads -r FOO":...
Thomas Arendsen Hein -
r1551:e793cbc8 default
parent child Browse files
Show More
@@ -1,2656 +1,2656 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from node import *
10 10 from i18n import gettext as _
11 11 demandload(globals(), "os re sys signal shutil imp urllib pdb")
12 12 demandload(globals(), "fancyopts ui hg util lock revlog")
13 13 demandload(globals(), "fnmatch hgweb mdiff random signal time traceback")
14 14 demandload(globals(), "errno socket version struct atexit sets bz2")
15 15
16 16 class UnknownCommand(Exception):
17 17 """Exception raised if command is not in the command table."""
18 18 class AmbiguousCommand(Exception):
19 19 """Exception raised if command shortcut matches more than one command."""
20 20
21 21 def filterfiles(filters, files):
22 22 l = [x for x in files if x in filters]
23 23
24 24 for t in filters:
25 25 if t and t[-1] != "/":
26 26 t += "/"
27 27 l += [x for x in files if x.startswith(t)]
28 28 return l
29 29
30 30 def relpath(repo, args):
31 31 cwd = repo.getcwd()
32 32 if cwd:
33 33 return [util.normpath(os.path.join(cwd, x)) for x in args]
34 34 return args
35 35
36 36 def matchpats(repo, cwd, pats=[], opts={}, head=''):
37 37 return util.cmdmatcher(repo.root, cwd, pats or ['.'], opts.get('include'),
38 38 opts.get('exclude'), head)
39 39
40 40 def makewalk(repo, pats, opts, head=''):
41 41 cwd = repo.getcwd()
42 42 files, matchfn, anypats = matchpats(repo, cwd, pats, opts, head)
43 43 exact = dict(zip(files, files))
44 44 def walk():
45 45 for src, fn in repo.walk(files=files, match=matchfn):
46 46 yield src, fn, util.pathto(cwd, fn), fn in exact
47 47 return files, matchfn, walk()
48 48
49 49 def walk(repo, pats, opts, head=''):
50 50 files, matchfn, results = makewalk(repo, pats, opts, head)
51 51 for r in results:
52 52 yield r
53 53
54 54 def walkchangerevs(ui, repo, cwd, pats, opts):
55 55 '''Iterate over files and the revs they changed in.
56 56
57 57 Callers most commonly need to iterate backwards over the history
58 58 it is interested in. Doing so has awful (quadratic-looking)
59 59 performance, so we use iterators in a "windowed" way.
60 60
61 61 We walk a window of revisions in the desired order. Within the
62 62 window, we first walk forwards to gather data, then in the desired
63 63 order (usually backwards) to display it.
64 64
65 65 This function returns an (iterator, getchange) pair. The
66 66 getchange function returns the changelog entry for a numeric
67 67 revision. The iterator yields 3-tuples. They will be of one of
68 68 the following forms:
69 69
70 70 "window", incrementing, lastrev: stepping through a window,
71 71 positive if walking forwards through revs, last rev in the
72 72 sequence iterated over - use to reset state for the current window
73 73
74 74 "add", rev, fns: out-of-order traversal of the given file names
75 75 fns, which changed during revision rev - use to gather data for
76 76 possible display
77 77
78 78 "iter", rev, None: in-order traversal of the revs earlier iterated
79 79 over with "add" - use to display data'''
80 80
81 81 if repo.changelog.count() == 0:
82 82 return [], False
83 83
84 84 cwd = repo.getcwd()
85 85 if not pats and cwd:
86 86 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
87 87 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
88 88 files, matchfn, anypats = matchpats(repo, (pats and cwd) or '',
89 89 pats, opts)
90 90 revs = map(int, revrange(ui, repo, opts['rev'] or ['tip:0']))
91 91 wanted = {}
92 92 slowpath = anypats
93 93 window = 300
94 94 fncache = {}
95 95
96 96 chcache = {}
97 97 def getchange(rev):
98 98 ch = chcache.get(rev)
99 99 if ch is None:
100 100 chcache[rev] = ch = repo.changelog.read(repo.lookup(str(rev)))
101 101 return ch
102 102
103 103 if not slowpath and not files:
104 104 # No files, no patterns. Display all revs.
105 105 wanted = dict(zip(revs, revs))
106 106 if not slowpath:
107 107 # Only files, no patterns. Check the history of each file.
108 108 def filerevgen(filelog):
109 109 for i in xrange(filelog.count() - 1, -1, -window):
110 110 revs = []
111 111 for j in xrange(max(0, i - window), i + 1):
112 112 revs.append(filelog.linkrev(filelog.node(j)))
113 113 revs.reverse()
114 114 for rev in revs:
115 115 yield rev
116 116
117 117 minrev, maxrev = min(revs), max(revs)
118 118 for file in files:
119 119 filelog = repo.file(file)
120 120 # A zero count may be a directory or deleted file, so
121 121 # try to find matching entries on the slow path.
122 122 if filelog.count() == 0:
123 123 slowpath = True
124 124 break
125 125 for rev in filerevgen(filelog):
126 126 if rev <= maxrev:
127 127 if rev < minrev:
128 128 break
129 129 fncache.setdefault(rev, [])
130 130 fncache[rev].append(file)
131 131 wanted[rev] = 1
132 132 if slowpath:
133 133 # The slow path checks files modified in every changeset.
134 134 def changerevgen():
135 135 for i in xrange(repo.changelog.count() - 1, -1, -window):
136 136 for j in xrange(max(0, i - window), i + 1):
137 137 yield j, getchange(j)[3]
138 138
139 139 for rev, changefiles in changerevgen():
140 140 matches = filter(matchfn, changefiles)
141 141 if matches:
142 142 fncache[rev] = matches
143 143 wanted[rev] = 1
144 144
145 145 def iterate():
146 146 for i in xrange(0, len(revs), window):
147 147 yield 'window', revs[0] < revs[-1], revs[-1]
148 148 nrevs = [rev for rev in revs[i:min(i+window, len(revs))]
149 149 if rev in wanted]
150 150 srevs = list(nrevs)
151 151 srevs.sort()
152 152 for rev in srevs:
153 153 fns = fncache.get(rev) or filter(matchfn, getchange(rev)[3])
154 154 yield 'add', rev, fns
155 155 for rev in nrevs:
156 156 yield 'iter', rev, None
157 157 return iterate(), getchange
158 158
159 159 revrangesep = ':'
160 160
161 161 def revrange(ui, repo, revs, revlog=None):
162 162 """Yield revision as strings from a list of revision specifications."""
163 163 if revlog is None:
164 164 revlog = repo.changelog
165 165 revcount = revlog.count()
166 166 def fix(val, defval):
167 167 if not val:
168 168 return defval
169 169 try:
170 170 num = int(val)
171 171 if str(num) != val:
172 172 raise ValueError
173 173 if num < 0: num += revcount
174 174 if num < 0: num = 0
175 175 elif num >= revcount:
176 176 raise ValueError
177 177 except ValueError:
178 178 try:
179 179 num = repo.changelog.rev(repo.lookup(val))
180 180 except KeyError:
181 181 try:
182 182 num = revlog.rev(revlog.lookup(val))
183 183 except KeyError:
184 184 raise util.Abort(_('invalid revision identifier %s'), val)
185 185 return num
186 186 seen = {}
187 187 for spec in revs:
188 188 if spec.find(revrangesep) >= 0:
189 189 start, end = spec.split(revrangesep, 1)
190 190 start = fix(start, 0)
191 191 end = fix(end, revcount - 1)
192 192 step = start > end and -1 or 1
193 193 for rev in xrange(start, end+step, step):
194 194 if rev in seen: continue
195 195 seen[rev] = 1
196 196 yield str(rev)
197 197 else:
198 198 rev = fix(spec, None)
199 199 if rev in seen: continue
200 200 seen[rev] = 1
201 201 yield str(rev)
202 202
203 203 def make_filename(repo, r, pat, node=None,
204 204 total=None, seqno=None, revwidth=None, pathname=None):
205 205 node_expander = {
206 206 'H': lambda: hex(node),
207 207 'R': lambda: str(r.rev(node)),
208 208 'h': lambda: short(node),
209 209 }
210 210 expander = {
211 211 '%': lambda: '%',
212 212 'b': lambda: os.path.basename(repo.root),
213 213 }
214 214
215 215 try:
216 216 if node:
217 217 expander.update(node_expander)
218 218 if node and revwidth is not None:
219 219 expander['r'] = lambda: str(r.rev(node)).zfill(revwidth)
220 220 if total is not None:
221 221 expander['N'] = lambda: str(total)
222 222 if seqno is not None:
223 223 expander['n'] = lambda: str(seqno)
224 224 if total is not None and seqno is not None:
225 225 expander['n'] = lambda:str(seqno).zfill(len(str(total)))
226 226 if pathname is not None:
227 227 expander['s'] = lambda: os.path.basename(pathname)
228 228 expander['d'] = lambda: os.path.dirname(pathname) or '.'
229 229 expander['p'] = lambda: pathname
230 230
231 231 newname = []
232 232 patlen = len(pat)
233 233 i = 0
234 234 while i < patlen:
235 235 c = pat[i]
236 236 if c == '%':
237 237 i += 1
238 238 c = pat[i]
239 239 c = expander[c]()
240 240 newname.append(c)
241 241 i += 1
242 242 return ''.join(newname)
243 243 except KeyError, inst:
244 244 raise util.Abort(_("invalid format spec '%%%s' in output file name"),
245 245 inst.args[0])
246 246
247 247 def make_file(repo, r, pat, node=None,
248 248 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
249 249 if not pat or pat == '-':
250 250 return 'w' in mode and sys.stdout or sys.stdin
251 251 if hasattr(pat, 'write') and 'w' in mode:
252 252 return pat
253 253 if hasattr(pat, 'read') and 'r' in mode:
254 254 return pat
255 255 return open(make_filename(repo, r, pat, node, total, seqno, revwidth,
256 256 pathname),
257 257 mode)
258 258
259 259 def dodiff(fp, ui, repo, node1, node2, files=None, match=util.always,
260 260 changes=None, text=False):
261 261 if not changes:
262 262 (c, a, d, u) = repo.changes(node1, node2, files, match=match)
263 263 else:
264 264 (c, a, d, u) = changes
265 265 if files:
266 266 c, a, d = map(lambda x: filterfiles(files, x), (c, a, d))
267 267
268 268 if not c and not a and not d:
269 269 return
270 270
271 271 if node2:
272 272 change = repo.changelog.read(node2)
273 273 mmap2 = repo.manifest.read(change[0])
274 274 date2 = util.datestr(change[2])
275 275 def read(f):
276 276 return repo.file(f).read(mmap2[f])
277 277 else:
278 278 date2 = util.datestr()
279 279 if not node1:
280 280 node1 = repo.dirstate.parents()[0]
281 281 def read(f):
282 282 return repo.wfile(f).read()
283 283
284 284 if ui.quiet:
285 285 r = None
286 286 else:
287 287 hexfunc = ui.verbose and hex or short
288 288 r = [hexfunc(node) for node in [node1, node2] if node]
289 289
290 290 change = repo.changelog.read(node1)
291 291 mmap = repo.manifest.read(change[0])
292 292 date1 = util.datestr(change[2])
293 293
294 294 for f in c:
295 295 to = None
296 296 if f in mmap:
297 297 to = repo.file(f).read(mmap[f])
298 298 tn = read(f)
299 299 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text))
300 300 for f in a:
301 301 to = None
302 302 tn = read(f)
303 303 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text))
304 304 for f in d:
305 305 to = repo.file(f).read(mmap[f])
306 306 tn = None
307 307 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text))
308 308
309 309 def trimuser(ui, name, rev, revcache):
310 310 """trim the name of the user who committed a change"""
311 311 user = revcache.get(rev)
312 312 if user is None:
313 313 user = revcache[rev] = ui.shortuser(name)
314 314 return user
315 315
316 316 def show_changeset(ui, repo, rev=0, changenode=None, brinfo=None):
317 317 """show a single changeset or file revision"""
318 318 log = repo.changelog
319 319 if changenode is None:
320 320 changenode = log.node(rev)
321 321 elif not rev:
322 322 rev = log.rev(changenode)
323 323
324 324 if ui.quiet:
325 325 ui.write("%d:%s\n" % (rev, short(changenode)))
326 326 return
327 327
328 328 changes = log.read(changenode)
329 329 date = util.datestr(changes[2])
330 330
331 331 parents = [(log.rev(p), ui.verbose and hex(p) or short(p))
332 332 for p in log.parents(changenode)
333 333 if ui.debugflag or p != nullid]
334 334 if not ui.debugflag and len(parents) == 1 and parents[0][0] == rev-1:
335 335 parents = []
336 336
337 337 if ui.verbose:
338 338 ui.write(_("changeset: %d:%s\n") % (rev, hex(changenode)))
339 339 else:
340 340 ui.write(_("changeset: %d:%s\n") % (rev, short(changenode)))
341 341
342 342 for tag in repo.nodetags(changenode):
343 343 ui.status(_("tag: %s\n") % tag)
344 344 for parent in parents:
345 345 ui.write(_("parent: %d:%s\n") % parent)
346 346
347 347 if brinfo and changenode in brinfo:
348 348 br = brinfo[changenode]
349 349 ui.write(_("branch: %s\n") % " ".join(br))
350 350
351 351 ui.debug(_("manifest: %d:%s\n") % (repo.manifest.rev(changes[0]),
352 352 hex(changes[0])))
353 353 ui.status(_("user: %s\n") % changes[1])
354 354 ui.status(_("date: %s\n") % date)
355 355
356 356 if ui.debugflag:
357 357 files = repo.changes(log.parents(changenode)[0], changenode)
358 358 for key, value in zip([_("files:"), _("files+:"), _("files-:")], files):
359 359 if value:
360 360 ui.note("%-12s %s\n" % (key, " ".join(value)))
361 361 else:
362 362 ui.note(_("files: %s\n") % " ".join(changes[3]))
363 363
364 364 description = changes[4].strip()
365 365 if description:
366 366 if ui.verbose:
367 367 ui.status(_("description:\n"))
368 368 ui.status(description)
369 369 ui.status("\n\n")
370 370 else:
371 371 ui.status(_("summary: %s\n") % description.splitlines()[0])
372 372 ui.status("\n")
373 373
374 374 def show_version(ui):
375 375 """output version and copyright information"""
376 376 ui.write(_("Mercurial Distributed SCM (version %s)\n")
377 377 % version.get_version())
378 378 ui.status(_(
379 379 "\nCopyright (C) 2005 Matt Mackall <mpm@selenic.com>\n"
380 380 "This is free software; see the source for copying conditions. "
381 381 "There is NO\nwarranty; "
382 382 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
383 383 ))
384 384
385 385 def help_(ui, cmd=None, with_version=False):
386 386 """show help for a given command or all commands"""
387 387 option_lists = []
388 388 if cmd and cmd != 'shortlist':
389 389 if with_version:
390 390 show_version(ui)
391 391 ui.write('\n')
392 392 aliases, i = find(cmd)
393 393 # synopsis
394 394 ui.write("%s\n\n" % i[2])
395 395
396 396 # description
397 397 doc = i[0].__doc__
398 398 if ui.quiet:
399 399 doc = doc.splitlines(0)[0]
400 400 ui.write("%s\n" % doc.rstrip())
401 401
402 402 if not ui.quiet:
403 403 # aliases
404 404 if len(aliases) > 1:
405 405 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
406 406
407 407 # options
408 408 if i[1]:
409 409 option_lists.append(("options", i[1]))
410 410
411 411 else:
412 412 # program name
413 413 if ui.verbose or with_version:
414 414 show_version(ui)
415 415 else:
416 416 ui.status(_("Mercurial Distributed SCM\n"))
417 417 ui.status('\n')
418 418
419 419 # list of commands
420 420 if cmd == "shortlist":
421 421 ui.status(_('basic commands (use "hg help" '
422 422 'for the full list or option "-v" for details):\n\n'))
423 423 elif ui.verbose:
424 424 ui.status(_('list of commands:\n\n'))
425 425 else:
426 426 ui.status(_('list of commands (use "hg help -v" '
427 427 'to show aliases and global options):\n\n'))
428 428
429 429 h = {}
430 430 cmds = {}
431 431 for c, e in table.items():
432 432 f = c.split("|")[0]
433 433 if cmd == "shortlist" and not f.startswith("^"):
434 434 continue
435 435 f = f.lstrip("^")
436 436 if not ui.debugflag and f.startswith("debug"):
437 437 continue
438 438 d = ""
439 439 if e[0].__doc__:
440 440 d = e[0].__doc__.splitlines(0)[0].rstrip()
441 441 h[f] = d
442 442 cmds[f]=c.lstrip("^")
443 443
444 444 fns = h.keys()
445 445 fns.sort()
446 446 m = max(map(len, fns))
447 447 for f in fns:
448 448 if ui.verbose:
449 449 commands = cmds[f].replace("|",", ")
450 450 ui.write(" %s:\n %s\n"%(commands,h[f]))
451 451 else:
452 452 ui.write(' %-*s %s\n' % (m, f, h[f]))
453 453
454 454 # global options
455 455 if ui.verbose:
456 456 option_lists.append(("global options", globalopts))
457 457
458 458 # list all option lists
459 459 opt_output = []
460 460 for title, options in option_lists:
461 461 opt_output.append(("\n%s:\n" % title, None))
462 462 for shortopt, longopt, default, desc in options:
463 463 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
464 464 longopt and " --%s" % longopt),
465 465 "%s%s" % (desc,
466 466 default and _(" (default: %s)") % default
467 467 or "")))
468 468
469 469 if opt_output:
470 470 opts_len = max([len(line[0]) for line in opt_output if line[1]])
471 471 for first, second in opt_output:
472 472 if second:
473 473 ui.write(" %-*s %s\n" % (opts_len, first, second))
474 474 else:
475 475 ui.write("%s\n" % first)
476 476
477 477 # Commands start here, listed alphabetically
478 478
479 479 def add(ui, repo, *pats, **opts):
480 480 """add the specified files on the next commit
481 481
482 482 Schedule files to be version controlled and added to the repository.
483 483
484 484 The files will be added to the repository at the next commit.
485 485
486 486 If no names are given, add all files in the current directory and
487 487 its subdirectories.
488 488 """
489 489
490 490 names = []
491 491 for src, abs, rel, exact in walk(repo, pats, opts):
492 492 if exact:
493 493 if ui.verbose: ui.status(_('adding %s\n') % rel)
494 494 names.append(abs)
495 495 elif repo.dirstate.state(abs) == '?':
496 496 ui.status(_('adding %s\n') % rel)
497 497 names.append(abs)
498 498 repo.add(names)
499 499
500 500 def addremove(ui, repo, *pats, **opts):
501 501 """add all new files, delete all missing files
502 502
503 503 Add all new files and remove all missing files from the repository.
504 504
505 505 New files are ignored if they match any of the patterns in .hgignore. As
506 506 with add, these changes take effect at the next commit.
507 507 """
508 508 add, remove = [], []
509 509 for src, abs, rel, exact in walk(repo, pats, opts):
510 510 if src == 'f' and repo.dirstate.state(abs) == '?':
511 511 add.append(abs)
512 512 if ui.verbose or not exact:
513 513 ui.status(_('adding %s\n') % rel)
514 514 if repo.dirstate.state(abs) != 'r' and not os.path.exists(rel):
515 515 remove.append(abs)
516 516 if ui.verbose or not exact:
517 517 ui.status(_('removing %s\n') % rel)
518 518 repo.add(add)
519 519 repo.remove(remove)
520 520
521 521 def annotate(ui, repo, *pats, **opts):
522 522 """show changeset information per file line
523 523
524 524 List changes in files, showing the revision id responsible for each line
525 525
526 526 This command is useful to discover who did a change or when a change took
527 527 place.
528 528
529 529 Without the -a option, annotate will avoid processing files it
530 530 detects as binary. With -a, annotate will generate an annotation
531 531 anyway, probably with undesirable results.
532 532 """
533 533 def getnode(rev):
534 534 return short(repo.changelog.node(rev))
535 535
536 536 ucache = {}
537 537 def getname(rev):
538 538 cl = repo.changelog.read(repo.changelog.node(rev))
539 539 return trimuser(ui, cl[1], rev, ucache)
540 540
541 541 dcache = {}
542 542 def getdate(rev):
543 543 datestr = dcache.get(rev)
544 544 if datestr is None:
545 545 cl = repo.changelog.read(repo.changelog.node(rev))
546 546 datestr = dcache[rev] = util.datestr(cl[2])
547 547 return datestr
548 548
549 549 if not pats:
550 550 raise util.Abort(_('at least one file name or pattern required'))
551 551
552 552 opmap = [['user', getname], ['number', str], ['changeset', getnode],
553 553 ['date', getdate]]
554 554 if not opts['user'] and not opts['changeset'] and not opts['date']:
555 555 opts['number'] = 1
556 556
557 557 if opts['rev']:
558 558 node = repo.changelog.lookup(opts['rev'])
559 559 else:
560 560 node = repo.dirstate.parents()[0]
561 561 change = repo.changelog.read(node)
562 562 mmap = repo.manifest.read(change[0])
563 563
564 564 for src, abs, rel, exact in walk(repo, pats, opts):
565 565 if abs not in mmap:
566 566 ui.warn(_("warning: %s is not in the repository!\n") % rel)
567 567 continue
568 568
569 569 f = repo.file(abs)
570 570 if not opts['text'] and util.binary(f.read(mmap[abs])):
571 571 ui.write(_("%s: binary file\n") % rel)
572 572 continue
573 573
574 574 lines = f.annotate(mmap[abs])
575 575 pieces = []
576 576
577 577 for o, f in opmap:
578 578 if opts[o]:
579 579 l = [f(n) for n, dummy in lines]
580 580 if l:
581 581 m = max(map(len, l))
582 582 pieces.append(["%*s" % (m, x) for x in l])
583 583
584 584 if pieces:
585 585 for p, l in zip(zip(*pieces), lines):
586 586 ui.write("%s: %s" % (" ".join(p), l[1]))
587 587
588 588 def bundle(ui, repo, fname, dest="default-push", **opts):
589 589 """create a changegroup file
590 590
591 591 Generate a compressed changegroup file collecting all changesets
592 592 not found in the other repository.
593 593
594 594 This file can then be transferred using conventional means and
595 595 applied to another repository with the unbundle command. This is
596 596 useful when native push and pull are not available or when
597 597 exporting an entire repository is undesirable. The standard file
598 598 extension is ".hg".
599 599
600 600 Unlike import/export, this exactly preserves all changeset
601 601 contents including permissions, rename data, and revision history.
602 602 """
603 603 f = open(fname, "wb")
604 604 dest = ui.expandpath(dest, repo.root)
605 605 other = hg.repository(ui, dest)
606 606 o = repo.findoutgoing(other)
607 607 cg = repo.changegroup(o)
608 608
609 609 try:
610 610 f.write("HG10")
611 611 z = bz2.BZ2Compressor(9)
612 612 while 1:
613 613 chunk = cg.read(4096)
614 614 if not chunk:
615 615 break
616 616 f.write(z.compress(chunk))
617 617 f.write(z.flush())
618 618 except:
619 619 os.unlink(fname)
620 620 raise
621 621
622 622 def cat(ui, repo, file1, *pats, **opts):
623 623 """output the latest or given revisions of files
624 624
625 625 Print the specified files as they were at the given revision.
626 626 If no revision is given then the tip is used.
627 627
628 628 Output may be to a file, in which case the name of the file is
629 629 given using a format string. The formatting rules are the same as
630 630 for the export command, with the following additions:
631 631
632 632 %s basename of file being printed
633 633 %d dirname of file being printed, or '.' if in repo root
634 634 %p root-relative path name of file being printed
635 635 """
636 636 mf = {}
637 637 rev = opts['rev']
638 638 if rev:
639 639 change = repo.changelog.read(repo.lookup(rev))
640 640 mf = repo.manifest.read(change[0])
641 641 for src, abs, rel, exact in walk(repo, (file1,) + pats, opts):
642 642 r = repo.file(abs)
643 643 if rev:
644 644 try:
645 645 n = mf[abs]
646 646 except (hg.RepoError, KeyError):
647 647 try:
648 648 n = r.lookup(rev)
649 649 except KeyError, inst:
650 650 raise util.Abort(_('cannot find file %s in rev %s'), rel, rev)
651 651 else:
652 652 n = r.tip()
653 653 fp = make_file(repo, r, opts['output'], node=n, pathname=abs)
654 654 fp.write(r.read(n))
655 655
656 656 def clone(ui, source, dest=None, **opts):
657 657 """make a copy of an existing repository
658 658
659 659 Create a copy of an existing repository in a new directory.
660 660
661 661 If no destination directory name is specified, it defaults to the
662 662 basename of the source.
663 663
664 664 The location of the source is added to the new repository's
665 665 .hg/hgrc file, as the default to be used for future pulls.
666 666
667 667 For efficiency, hardlinks are used for cloning whenever the source
668 668 and destination are on the same filesystem. Some filesystems,
669 669 such as AFS, implement hardlinking incorrectly, but do not report
670 670 errors. In these cases, use the --pull option to avoid
671 671 hardlinking.
672 672 """
673 673 if dest is None:
674 674 dest = os.path.basename(os.path.normpath(source))
675 675
676 676 if os.path.exists(dest):
677 677 raise util.Abort(_("destination '%s' already exists"), dest)
678 678
679 679 dest = os.path.realpath(dest)
680 680
681 681 class Dircleanup:
682 682 def __init__(self, dir_):
683 683 self.rmtree = shutil.rmtree
684 684 self.dir_ = dir_
685 685 os.mkdir(dir_)
686 686 def close(self):
687 687 self.dir_ = None
688 688 def __del__(self):
689 689 if self.dir_:
690 690 self.rmtree(self.dir_, True)
691 691
692 692 if opts['ssh']:
693 693 ui.setconfig("ui", "ssh", opts['ssh'])
694 694 if opts['remotecmd']:
695 695 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
696 696
697 697 if not os.path.exists(source):
698 698 source = ui.expandpath(source)
699 699
700 700 d = Dircleanup(dest)
701 701 abspath = source
702 702 other = hg.repository(ui, source)
703 703
704 704 copy = False
705 705 if other.dev() != -1:
706 706 abspath = os.path.abspath(source)
707 707 if not opts['pull'] and not opts['rev']:
708 708 copy = True
709 709
710 710 if copy:
711 711 try:
712 712 # we use a lock here because if we race with commit, we
713 713 # can end up with extra data in the cloned revlogs that's
714 714 # not pointed to by changesets, thus causing verify to
715 715 # fail
716 716 l1 = lock.lock(os.path.join(source, ".hg", "lock"))
717 717 except OSError:
718 718 copy = False
719 719
720 720 if copy:
721 721 # we lock here to avoid premature writing to the target
722 722 os.mkdir(os.path.join(dest, ".hg"))
723 723 l2 = lock.lock(os.path.join(dest, ".hg", "lock"))
724 724
725 725 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
726 726 for f in files.split():
727 727 src = os.path.join(source, ".hg", f)
728 728 dst = os.path.join(dest, ".hg", f)
729 729 try:
730 730 util.copyfiles(src, dst)
731 731 except OSError, inst:
732 732 if inst.errno != errno.ENOENT: raise
733 733
734 734 repo = hg.repository(ui, dest)
735 735
736 736 else:
737 737 revs = None
738 738 if opts['rev']:
739 739 if not other.local():
740 740 raise util.Abort("clone -r not supported yet for remote repositories.")
741 741 else:
742 742 revs = [other.lookup(rev) for rev in opts['rev']]
743 743 repo = hg.repository(ui, dest, create=1)
744 744 repo.pull(other, heads = revs)
745 745
746 746 f = repo.opener("hgrc", "w", text=True)
747 747 f.write("[paths]\n")
748 748 f.write("default = %s\n" % abspath)
749 749 f.close()
750 750
751 751 if not opts['noupdate']:
752 752 update(ui, repo)
753 753
754 754 d.close()
755 755
756 756 def commit(ui, repo, *pats, **opts):
757 757 """commit the specified files or all outstanding changes
758 758
759 759 Commit changes to the given files into the repository.
760 760
761 761 If a list of files is omitted, all changes reported by "hg status"
762 762 from the root of the repository will be commited.
763 763
764 764 The HGEDITOR or EDITOR environment variables are used to start an
765 765 editor to add a commit comment.
766 766 """
767 767 message = opts['message']
768 768 logfile = opts['logfile']
769 769
770 770 if message and logfile:
771 771 raise util.Abort(_('options --message and --logfile are mutually '
772 772 'exclusive'))
773 773 if not message and logfile:
774 774 try:
775 775 if logfile == '-':
776 776 message = sys.stdin.read()
777 777 else:
778 778 message = open(logfile).read()
779 779 except IOError, inst:
780 780 raise util.Abort(_("can't read commit message '%s': %s") %
781 781 (logfile, inst.strerror))
782 782
783 783 if opts['addremove']:
784 784 addremove(ui, repo, *pats, **opts)
785 785 cwd = repo.getcwd()
786 786 if not pats and cwd:
787 787 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
788 788 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
789 789 fns, match, anypats = matchpats(repo, (pats and repo.getcwd()) or '',
790 790 pats, opts)
791 791 if pats:
792 792 c, a, d, u = repo.changes(files=fns, match=match)
793 793 files = c + a + [fn for fn in d if repo.dirstate.state(fn) == 'r']
794 794 else:
795 795 files = []
796 796 try:
797 797 repo.commit(files, message, opts['user'], opts['date'], match)
798 798 except ValueError, inst:
799 799 raise util.Abort(str(inst))
800 800
801 801 def docopy(ui, repo, pats, opts):
802 802 cwd = repo.getcwd()
803 803 errors = 0
804 804 copied = []
805 805 targets = {}
806 806
807 807 def okaytocopy(abs, rel, exact):
808 808 reasons = {'?': _('is not managed'),
809 809 'a': _('has been marked for add')}
810 810 reason = reasons.get(repo.dirstate.state(abs))
811 811 if reason:
812 812 if exact: ui.warn(_('%s: not copying - file %s\n') % (rel, reason))
813 813 else:
814 814 return True
815 815
816 816 def copy(abssrc, relsrc, target, exact):
817 817 abstarget = util.canonpath(repo.root, cwd, target)
818 818 reltarget = util.pathto(cwd, abstarget)
819 819 prevsrc = targets.get(abstarget)
820 820 if prevsrc is not None:
821 821 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
822 822 (reltarget, abssrc, prevsrc))
823 823 return
824 824 elif os.path.exists(reltarget):
825 825 if opts['force']:
826 826 os.unlink(reltarget)
827 827 else:
828 828 ui.warn(_('%s: not overwriting - file exists\n') %
829 829 reltarget)
830 830 return
831 831 if ui.verbose or not exact:
832 832 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
833 833 if not opts['after']:
834 834 targetdir = os.path.dirname(reltarget) or '.'
835 835 if not os.path.isdir(targetdir):
836 836 os.makedirs(targetdir)
837 837 try:
838 838 shutil.copyfile(relsrc, reltarget)
839 839 shutil.copymode(relsrc, reltarget)
840 840 except shutil.Error, inst:
841 841 raise util.Abort(str(inst))
842 842 except IOError, inst:
843 843 if inst.errno == errno.ENOENT:
844 844 ui.warn(_('%s: deleted in working copy\n') % relsrc)
845 845 else:
846 846 ui.warn(_('%s: cannot copy - %s\n') %
847 847 (relsrc, inst.strerror))
848 848 errors += 1
849 849 return
850 850 targets[abstarget] = abssrc
851 851 repo.copy(abssrc, abstarget)
852 852 copied.append((abssrc, relsrc, exact))
853 853
854 854 pats = list(pats)
855 855 if not pats:
856 856 raise util.Abort(_('no source or destination specified'))
857 857 if len(pats) == 1:
858 858 raise util.Abort(_('no destination specified'))
859 859 dest = pats.pop()
860 860 destdirexists = os.path.isdir(dest)
861 861 if (len(pats) > 1 or not os.path.exists(pats[0])) and not destdirexists:
862 862 raise util.Abort(_('with multiple sources, destination must be an '
863 863 'existing directory'))
864 864
865 865 for pat in pats:
866 866 if os.path.isdir(pat):
867 867 if destdirexists:
868 868 striplen = len(os.path.split(pat)[0])
869 869 else:
870 870 striplen = len(pat)
871 871 if striplen:
872 872 striplen += len(os.sep)
873 873 targetpath = lambda p: os.path.join(dest, p[striplen:])
874 874 elif destdirexists:
875 875 targetpath = lambda p: os.path.join(dest, os.path.basename(p))
876 876 else:
877 877 targetpath = lambda p: dest
878 878 for tag, abssrc, relsrc, exact in walk(repo, [pat], opts):
879 879 if okaytocopy(abssrc, relsrc, exact):
880 880 copy(abssrc, relsrc, targetpath(abssrc), exact)
881 881
882 882 if errors:
883 883 ui.warn(_('(consider using --after)\n'))
884 884 if len(copied) == 0:
885 885 raise util.Abort(_('no files to copy'))
886 886 return errors, copied
887 887
888 888 def copy(ui, repo, *pats, **opts):
889 889 """mark files as copied for the next commit
890 890
891 891 Mark dest as having copies of source files. If dest is a
892 892 directory, copies are put in that directory. If dest is a file,
893 893 there can only be one source.
894 894
895 895 By default, this command copies the contents of files as they
896 896 stand in the working directory. If invoked with --after, the
897 897 operation is recorded, but no copying is performed.
898 898
899 899 This command takes effect in the next commit.
900 900
901 901 NOTE: This command should be treated as experimental. While it
902 902 should properly record copied files, this information is not yet
903 903 fully used by merge, nor fully reported by log.
904 904 """
905 905 errs, copied = docopy(ui, repo, pats, opts)
906 906 return errs
907 907
908 908 def debugancestor(ui, index, rev1, rev2):
909 909 """find the ancestor revision of two revisions in a given index"""
910 910 r = revlog.revlog(util.opener(os.getcwd()), index, "")
911 911 a = r.ancestor(r.lookup(rev1), r.lookup(rev2))
912 912 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
913 913
914 914 def debugcheckstate(ui, repo):
915 915 """validate the correctness of the current dirstate"""
916 916 parent1, parent2 = repo.dirstate.parents()
917 917 repo.dirstate.read()
918 918 dc = repo.dirstate.map
919 919 keys = dc.keys()
920 920 keys.sort()
921 921 m1n = repo.changelog.read(parent1)[0]
922 922 m2n = repo.changelog.read(parent2)[0]
923 923 m1 = repo.manifest.read(m1n)
924 924 m2 = repo.manifest.read(m2n)
925 925 errors = 0
926 926 for f in dc:
927 927 state = repo.dirstate.state(f)
928 928 if state in "nr" and f not in m1:
929 929 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
930 930 errors += 1
931 931 if state in "a" and f in m1:
932 932 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
933 933 errors += 1
934 934 if state in "m" and f not in m1 and f not in m2:
935 935 ui.warn(_("%s in state %s, but not in either manifest\n") %
936 936 (f, state))
937 937 errors += 1
938 938 for f in m1:
939 939 state = repo.dirstate.state(f)
940 940 if state not in "nrm":
941 941 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
942 942 errors += 1
943 943 if errors:
944 944 raise util.Abort(_(".hg/dirstate inconsistent with current parent's manifest"))
945 945
946 946 def debugconfig(ui):
947 947 """show combined config settings from all hgrc files"""
948 948 try:
949 949 repo = hg.repository(ui)
950 950 except hg.RepoError:
951 951 pass
952 952 for section, name, value in ui.walkconfig():
953 953 ui.write('%s.%s=%s\n' % (section, name, value))
954 954
955 955 def debugsetparents(ui, repo, rev1, rev2=None):
956 956 """manually set the parents of the current working directory
957 957
958 958 This is useful for writing repository conversion tools, but should
959 959 be used with care.
960 960 """
961 961
962 962 if not rev2:
963 963 rev2 = hex(nullid)
964 964
965 965 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
966 966
967 967 def debugstate(ui, repo):
968 968 """show the contents of the current dirstate"""
969 969 repo.dirstate.read()
970 970 dc = repo.dirstate.map
971 971 keys = dc.keys()
972 972 keys.sort()
973 973 for file_ in keys:
974 974 ui.write("%c %3o %10d %s %s\n"
975 975 % (dc[file_][0], dc[file_][1] & 0777, dc[file_][2],
976 976 time.strftime("%x %X",
977 977 time.localtime(dc[file_][3])), file_))
978 978 for f in repo.dirstate.copies:
979 979 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copies[f], f))
980 980
981 981 def debugdata(ui, file_, rev):
982 982 """dump the contents of an data file revision"""
983 983 r = revlog.revlog(util.opener(os.getcwd()), file_[:-2] + ".i", file_)
984 984 try:
985 985 ui.write(r.revision(r.lookup(rev)))
986 986 except KeyError:
987 987 raise util.Abort(_('invalid revision identifier %s'), rev)
988 988
989 989 def debugindex(ui, file_):
990 990 """dump the contents of an index file"""
991 991 r = revlog.revlog(util.opener(os.getcwd()), file_, "")
992 992 ui.write(" rev offset length base linkrev" +
993 993 " nodeid p1 p2\n")
994 994 for i in range(r.count()):
995 995 e = r.index[i]
996 996 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
997 997 i, e[0], e[1], e[2], e[3],
998 998 short(e[6]), short(e[4]), short(e[5])))
999 999
1000 1000 def debugindexdot(ui, file_):
1001 1001 """dump an index DAG as a .dot file"""
1002 1002 r = revlog.revlog(util.opener(os.getcwd()), file_, "")
1003 1003 ui.write("digraph G {\n")
1004 1004 for i in range(r.count()):
1005 1005 e = r.index[i]
1006 1006 ui.write("\t%d -> %d\n" % (r.rev(e[4]), i))
1007 1007 if e[5] != nullid:
1008 1008 ui.write("\t%d -> %d\n" % (r.rev(e[5]), i))
1009 1009 ui.write("}\n")
1010 1010
1011 1011 def debugrename(ui, repo, file, rev=None):
1012 1012 """dump rename information"""
1013 1013 r = repo.file(relpath(repo, [file])[0])
1014 1014 if rev:
1015 1015 try:
1016 1016 # assume all revision numbers are for changesets
1017 1017 n = repo.lookup(rev)
1018 1018 change = repo.changelog.read(n)
1019 1019 m = repo.manifest.read(change[0])
1020 1020 n = m[relpath(repo, [file])[0]]
1021 1021 except (hg.RepoError, KeyError):
1022 1022 n = r.lookup(rev)
1023 1023 else:
1024 1024 n = r.tip()
1025 1025 m = r.renamed(n)
1026 1026 if m:
1027 1027 ui.write(_("renamed from %s:%s\n") % (m[0], hex(m[1])))
1028 1028 else:
1029 1029 ui.write(_("not renamed\n"))
1030 1030
1031 1031 def debugwalk(ui, repo, *pats, **opts):
1032 1032 """show how files match on given patterns"""
1033 1033 items = list(walk(repo, pats, opts))
1034 1034 if not items:
1035 1035 return
1036 1036 fmt = '%%s %%-%ds %%-%ds %%s' % (
1037 1037 max([len(abs) for (src, abs, rel, exact) in items]),
1038 1038 max([len(rel) for (src, abs, rel, exact) in items]))
1039 1039 for src, abs, rel, exact in items:
1040 1040 line = fmt % (src, abs, rel, exact and 'exact' or '')
1041 1041 ui.write("%s\n" % line.rstrip())
1042 1042
1043 1043 def diff(ui, repo, *pats, **opts):
1044 1044 """diff working directory (or selected files)
1045 1045
1046 1046 Show differences between revisions for the specified files.
1047 1047
1048 1048 Differences between files are shown using the unified diff format.
1049 1049
1050 1050 When two revision arguments are given, then changes are shown
1051 1051 between those revisions. If only one revision is specified then
1052 1052 that revision is compared to the working directory, and, when no
1053 1053 revisions are specified, the working directory files are compared
1054 1054 to its parent.
1055 1055
1056 1056 Without the -a option, diff will avoid generating diffs of files
1057 1057 it detects as binary. With -a, diff will generate a diff anyway,
1058 1058 probably with undesirable results.
1059 1059 """
1060 1060 node1, node2 = None, None
1061 1061 revs = [repo.lookup(x) for x in opts['rev']]
1062 1062
1063 1063 if len(revs) > 0:
1064 1064 node1 = revs[0]
1065 1065 if len(revs) > 1:
1066 1066 node2 = revs[1]
1067 1067 if len(revs) > 2:
1068 1068 raise util.Abort(_("too many revisions to diff"))
1069 1069
1070 1070 fns, matchfn, anypats = matchpats(repo, repo.getcwd(), pats, opts)
1071 1071
1072 1072 dodiff(sys.stdout, ui, repo, node1, node2, fns, match=matchfn,
1073 1073 text=opts['text'])
1074 1074
1075 1075 def doexport(ui, repo, changeset, seqno, total, revwidth, opts):
1076 1076 node = repo.lookup(changeset)
1077 1077 prev, other = repo.changelog.parents(node)
1078 1078 change = repo.changelog.read(node)
1079 1079
1080 1080 fp = make_file(repo, repo.changelog, opts['output'],
1081 1081 node=node, total=total, seqno=seqno,
1082 1082 revwidth=revwidth)
1083 1083 if fp != sys.stdout:
1084 1084 ui.note("%s\n" % fp.name)
1085 1085
1086 1086 fp.write("# HG changeset patch\n")
1087 1087 fp.write("# User %s\n" % change[1])
1088 1088 fp.write("# Node ID %s\n" % hex(node))
1089 1089 fp.write("# Parent %s\n" % hex(prev))
1090 1090 if other != nullid:
1091 1091 fp.write("# Parent %s\n" % hex(other))
1092 1092 fp.write(change[4].rstrip())
1093 1093 fp.write("\n\n")
1094 1094
1095 1095 dodiff(fp, ui, repo, prev, node, text=opts['text'])
1096 1096 if fp != sys.stdout:
1097 1097 fp.close()
1098 1098
1099 1099 def export(ui, repo, *changesets, **opts):
1100 1100 """dump the header and diffs for one or more changesets
1101 1101
1102 1102 Print the changeset header and diffs for one or more revisions.
1103 1103
1104 1104 The information shown in the changeset header is: author,
1105 1105 changeset hash, parent and commit comment.
1106 1106
1107 1107 Output may be to a file, in which case the name of the file is
1108 1108 given using a format string. The formatting rules are as follows:
1109 1109
1110 1110 %% literal "%" character
1111 1111 %H changeset hash (40 bytes of hexadecimal)
1112 1112 %N number of patches being generated
1113 1113 %R changeset revision number
1114 1114 %b basename of the exporting repository
1115 1115 %h short-form changeset hash (12 bytes of hexadecimal)
1116 1116 %n zero-padded sequence number, starting at 1
1117 1117 %r zero-padded changeset revision number
1118 1118
1119 1119 Without the -a option, export will avoid generating diffs of files
1120 1120 it detects as binary. With -a, export will generate a diff anyway,
1121 1121 probably with undesirable results.
1122 1122 """
1123 1123 if not changesets:
1124 1124 raise util.Abort(_("export requires at least one changeset"))
1125 1125 seqno = 0
1126 1126 revs = list(revrange(ui, repo, changesets))
1127 1127 total = len(revs)
1128 1128 revwidth = max(map(len, revs))
1129 1129 ui.note(len(revs) > 1 and _("Exporting patches:\n") or _("Exporting patch:\n"))
1130 1130 for cset in revs:
1131 1131 seqno += 1
1132 1132 doexport(ui, repo, cset, seqno, total, revwidth, opts)
1133 1133
1134 1134 def forget(ui, repo, *pats, **opts):
1135 1135 """don't add the specified files on the next commit
1136 1136
1137 1137 Undo an 'hg add' scheduled for the next commit.
1138 1138 """
1139 1139 forget = []
1140 1140 for src, abs, rel, exact in walk(repo, pats, opts):
1141 1141 if repo.dirstate.state(abs) == 'a':
1142 1142 forget.append(abs)
1143 1143 if ui.verbose or not exact:
1144 1144 ui.status(_('forgetting %s\n') % rel)
1145 1145 repo.forget(forget)
1146 1146
1147 1147 def grep(ui, repo, pattern, *pats, **opts):
1148 1148 """search for a pattern in specified files and revisions
1149 1149
1150 1150 Search revisions of files for a regular expression.
1151 1151
1152 1152 This command behaves differently than Unix grep. It only accepts
1153 1153 Python/Perl regexps. It searches repository history, not the
1154 1154 working directory. It always prints the revision number in which
1155 1155 a match appears.
1156 1156
1157 1157 By default, grep only prints output for the first revision of a
1158 1158 file in which it finds a match. To get it to print every revision
1159 1159 that contains a change in match status ("-" for a match that
1160 1160 becomes a non-match, or "+" for a non-match that becomes a match),
1161 1161 use the --all flag.
1162 1162 """
1163 1163 reflags = 0
1164 1164 if opts['ignore_case']:
1165 1165 reflags |= re.I
1166 1166 regexp = re.compile(pattern, reflags)
1167 1167 sep, eol = ':', '\n'
1168 1168 if opts['print0']:
1169 1169 sep = eol = '\0'
1170 1170
1171 1171 fcache = {}
1172 1172 def getfile(fn):
1173 1173 if fn not in fcache:
1174 1174 fcache[fn] = repo.file(fn)
1175 1175 return fcache[fn]
1176 1176
1177 1177 def matchlines(body):
1178 1178 begin = 0
1179 1179 linenum = 0
1180 1180 while True:
1181 1181 match = regexp.search(body, begin)
1182 1182 if not match:
1183 1183 break
1184 1184 mstart, mend = match.span()
1185 1185 linenum += body.count('\n', begin, mstart) + 1
1186 1186 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1187 1187 lend = body.find('\n', mend)
1188 1188 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1189 1189 begin = lend + 1
1190 1190
1191 1191 class linestate:
1192 1192 def __init__(self, line, linenum, colstart, colend):
1193 1193 self.line = line
1194 1194 self.linenum = linenum
1195 1195 self.colstart = colstart
1196 1196 self.colend = colend
1197 1197 def __eq__(self, other):
1198 1198 return self.line == other.line
1199 1199 def __hash__(self):
1200 1200 return hash(self.line)
1201 1201
1202 1202 matches = {}
1203 1203 def grepbody(fn, rev, body):
1204 1204 matches[rev].setdefault(fn, {})
1205 1205 m = matches[rev][fn]
1206 1206 for lnum, cstart, cend, line in matchlines(body):
1207 1207 s = linestate(line, lnum, cstart, cend)
1208 1208 m[s] = s
1209 1209
1210 1210 prev = {}
1211 1211 ucache = {}
1212 1212 def display(fn, rev, states, prevstates):
1213 1213 diff = list(sets.Set(states).symmetric_difference(sets.Set(prevstates)))
1214 1214 diff.sort(lambda x, y: cmp(x.linenum, y.linenum))
1215 1215 counts = {'-': 0, '+': 0}
1216 1216 filerevmatches = {}
1217 1217 for l in diff:
1218 1218 if incrementing or not opts['all']:
1219 1219 change = ((l in prevstates) and '-') or '+'
1220 1220 r = rev
1221 1221 else:
1222 1222 change = ((l in states) and '-') or '+'
1223 1223 r = prev[fn]
1224 1224 cols = [fn, str(rev)]
1225 1225 if opts['line_number']: cols.append(str(l.linenum))
1226 1226 if opts['all']: cols.append(change)
1227 1227 if opts['user']: cols.append(trimuser(ui, getchange(rev)[1], rev,
1228 1228 ucache))
1229 1229 if opts['files_with_matches']:
1230 1230 c = (fn, rev)
1231 1231 if c in filerevmatches: continue
1232 1232 filerevmatches[c] = 1
1233 1233 else:
1234 1234 cols.append(l.line)
1235 1235 ui.write(sep.join(cols), eol)
1236 1236 counts[change] += 1
1237 1237 return counts['+'], counts['-']
1238 1238
1239 1239 fstate = {}
1240 1240 skip = {}
1241 1241 changeiter, getchange = walkchangerevs(ui, repo, repo.getcwd(), pats, opts)
1242 1242 count = 0
1243 1243 incrementing = False
1244 1244 for st, rev, fns in changeiter:
1245 1245 if st == 'window':
1246 1246 incrementing = rev
1247 1247 matches.clear()
1248 1248 elif st == 'add':
1249 1249 change = repo.changelog.read(repo.lookup(str(rev)))
1250 1250 mf = repo.manifest.read(change[0])
1251 1251 matches[rev] = {}
1252 1252 for fn in fns:
1253 1253 if fn in skip: continue
1254 1254 fstate.setdefault(fn, {})
1255 1255 try:
1256 1256 grepbody(fn, rev, getfile(fn).read(mf[fn]))
1257 1257 except KeyError:
1258 1258 pass
1259 1259 elif st == 'iter':
1260 1260 states = matches[rev].items()
1261 1261 states.sort()
1262 1262 for fn, m in states:
1263 1263 if fn in skip: continue
1264 1264 if incrementing or not opts['all'] or fstate[fn]:
1265 1265 pos, neg = display(fn, rev, m, fstate[fn])
1266 1266 count += pos + neg
1267 1267 if pos and not opts['all']:
1268 1268 skip[fn] = True
1269 1269 fstate[fn] = m
1270 1270 prev[fn] = rev
1271 1271
1272 1272 if not incrementing:
1273 1273 fstate = fstate.items()
1274 1274 fstate.sort()
1275 1275 for fn, state in fstate:
1276 1276 if fn in skip: continue
1277 1277 display(fn, rev, {}, state)
1278 1278 return (count == 0 and 1) or 0
1279 1279
1280 1280 def heads(ui, repo, **opts):
1281 1281 """show current repository heads
1282 1282
1283 1283 Show all repository head changesets.
1284 1284
1285 1285 Repository "heads" are changesets that don't have children
1286 1286 changesets. They are where development generally takes place and
1287 1287 are the usual targets for update and merge operations.
1288 1288 """
1289 1289 if opts['rev']:
1290 heads = repo.heads(repo.lookup(rev))
1290 heads = repo.heads(repo.lookup(opts['rev']))
1291 1291 else:
1292 1292 heads = repo.heads()
1293 1293 br = None
1294 1294 if opts['branches']:
1295 br = repo.branchlookup(list(heads))
1295 br = repo.branchlookup(heads)
1296 1296 for n in heads:
1297 1297 show_changeset(ui, repo, changenode=n, brinfo=br)
1298 1298
1299 1299 def identify(ui, repo):
1300 1300 """print information about the working copy
1301 1301
1302 1302 Print a short summary of the current state of the repo.
1303 1303
1304 1304 This summary identifies the repository state using one or two parent
1305 1305 hash identifiers, followed by a "+" if there are uncommitted changes
1306 1306 in the working directory, followed by a list of tags for this revision.
1307 1307 """
1308 1308 parents = [p for p in repo.dirstate.parents() if p != nullid]
1309 1309 if not parents:
1310 1310 ui.write(_("unknown\n"))
1311 1311 return
1312 1312
1313 1313 hexfunc = ui.verbose and hex or short
1314 1314 (c, a, d, u) = repo.changes()
1315 1315 output = ["%s%s" % ('+'.join([hexfunc(parent) for parent in parents]),
1316 1316 (c or a or d) and "+" or "")]
1317 1317
1318 1318 if not ui.quiet:
1319 1319 # multiple tags for a single parent separated by '/'
1320 1320 parenttags = ['/'.join(tags)
1321 1321 for tags in map(repo.nodetags, parents) if tags]
1322 1322 # tags for multiple parents separated by ' + '
1323 1323 if parenttags:
1324 1324 output.append(' + '.join(parenttags))
1325 1325
1326 1326 ui.write("%s\n" % ' '.join(output))
1327 1327
1328 1328 def import_(ui, repo, patch1, *patches, **opts):
1329 1329 """import an ordered set of patches
1330 1330
1331 1331 Import a list of patches and commit them individually.
1332 1332
1333 1333 If there are outstanding changes in the working directory, import
1334 1334 will abort unless given the -f flag.
1335 1335
1336 1336 If a patch looks like a mail message (its first line starts with
1337 1337 "From " or looks like an RFC822 header), it will not be applied
1338 1338 unless the -f option is used. The importer neither parses nor
1339 1339 discards mail headers, so use -f only to override the "mailness"
1340 1340 safety check, not to import a real mail message.
1341 1341 """
1342 1342 patches = (patch1,) + patches
1343 1343
1344 1344 if not opts['force']:
1345 1345 (c, a, d, u) = repo.changes()
1346 1346 if c or a or d:
1347 1347 raise util.Abort(_("outstanding uncommitted changes"))
1348 1348
1349 1349 d = opts["base"]
1350 1350 strip = opts["strip"]
1351 1351
1352 1352 mailre = re.compile(r'(?:From |[\w-]+:)')
1353 1353
1354 1354 # attempt to detect the start of a patch
1355 1355 # (this heuristic is borrowed from quilt)
1356 1356 diffre = re.compile(r'(?:Index:[ \t]|diff[ \t]|RCS file: |' +
1357 1357 'retrieving revision [0-9]+(\.[0-9]+)*$|' +
1358 1358 '(---|\*\*\*)[ \t])')
1359 1359
1360 1360 for patch in patches:
1361 1361 ui.status(_("applying %s\n") % patch)
1362 1362 pf = os.path.join(d, patch)
1363 1363
1364 1364 message = []
1365 1365 user = None
1366 1366 hgpatch = False
1367 1367 for line in file(pf):
1368 1368 line = line.rstrip()
1369 1369 if (not message and not hgpatch and
1370 1370 mailre.match(line) and not opts['force']):
1371 1371 if len(line) > 35: line = line[:32] + '...'
1372 1372 raise util.Abort(_('first line looks like a '
1373 1373 'mail header: ') + line)
1374 1374 if diffre.match(line):
1375 1375 break
1376 1376 elif hgpatch:
1377 1377 # parse values when importing the result of an hg export
1378 1378 if line.startswith("# User "):
1379 1379 user = line[7:]
1380 1380 ui.debug(_('User: %s\n') % user)
1381 1381 elif not line.startswith("# ") and line:
1382 1382 message.append(line)
1383 1383 hgpatch = False
1384 1384 elif line == '# HG changeset patch':
1385 1385 hgpatch = True
1386 1386 message = [] # We may have collected garbage
1387 1387 else:
1388 1388 message.append(line)
1389 1389
1390 1390 # make sure message isn't empty
1391 1391 if not message:
1392 1392 message = _("imported patch %s\n") % patch
1393 1393 else:
1394 1394 message = "%s\n" % '\n'.join(message)
1395 1395 ui.debug(_('message:\n%s\n') % message)
1396 1396
1397 1397 files = util.patch(strip, pf, ui)
1398 1398
1399 1399 if len(files) > 0:
1400 1400 addremove(ui, repo, *files)
1401 1401 repo.commit(files, message, user)
1402 1402
1403 1403 def incoming(ui, repo, source="default", **opts):
1404 1404 """show new changesets found in source
1405 1405
1406 1406 Show new changesets found in the specified repo or the default
1407 1407 pull repo. These are the changesets that would be pulled if a pull
1408 1408 was requested.
1409 1409
1410 1410 Currently only local repositories are supported.
1411 1411 """
1412 1412 source = ui.expandpath(source, repo.root)
1413 1413 other = hg.repository(ui, source)
1414 1414 if not other.local():
1415 1415 raise util.Abort(_("incoming doesn't work for remote repositories yet"))
1416 1416 o = repo.findincoming(other)
1417 1417 if not o:
1418 1418 return
1419 1419 o = other.changelog.nodesbetween(o)[0]
1420 1420 if opts['newest_first']:
1421 1421 o.reverse()
1422 1422 for n in o:
1423 1423 parents = [p for p in other.changelog.parents(n) if p != nullid]
1424 1424 if opts['no_merges'] and len(parents) == 2:
1425 1425 continue
1426 1426 show_changeset(ui, other, changenode=n)
1427 1427 if opts['patch']:
1428 1428 prev = (parents and parents[0]) or nullid
1429 1429 dodiff(ui, ui, other, prev, n)
1430 1430 ui.write("\n")
1431 1431
1432 1432 def init(ui, dest="."):
1433 1433 """create a new repository in the given directory
1434 1434
1435 1435 Initialize a new repository in the given directory. If the given
1436 1436 directory does not exist, it is created.
1437 1437
1438 1438 If no directory is given, the current directory is used.
1439 1439 """
1440 1440 if not os.path.exists(dest):
1441 1441 os.mkdir(dest)
1442 1442 hg.repository(ui, dest, create=1)
1443 1443
1444 1444 def locate(ui, repo, *pats, **opts):
1445 1445 """locate files matching specific patterns
1446 1446
1447 1447 Print all files under Mercurial control whose names match the
1448 1448 given patterns.
1449 1449
1450 1450 This command searches the current directory and its
1451 1451 subdirectories. To search an entire repository, move to the root
1452 1452 of the repository.
1453 1453
1454 1454 If no patterns are given to match, this command prints all file
1455 1455 names.
1456 1456
1457 1457 If you want to feed the output of this command into the "xargs"
1458 1458 command, use the "-0" option to both this command and "xargs".
1459 1459 This will avoid the problem of "xargs" treating single filenames
1460 1460 that contain white space as multiple filenames.
1461 1461 """
1462 1462 end = opts['print0'] and '\0' or '\n'
1463 1463
1464 1464 for src, abs, rel, exact in walk(repo, pats, opts, '(?:.*/|)'):
1465 1465 if repo.dirstate.state(abs) == '?':
1466 1466 continue
1467 1467 if opts['fullpath']:
1468 1468 ui.write(os.path.join(repo.root, abs), end)
1469 1469 else:
1470 1470 ui.write(rel, end)
1471 1471
1472 1472 def log(ui, repo, *pats, **opts):
1473 1473 """show revision history of entire repository or files
1474 1474
1475 1475 Print the revision history of the specified files or the entire project.
1476 1476
1477 1477 By default this command outputs: changeset id and hash, tags,
1478 1478 parents, user, date and time, and a summary for each commit. The
1479 1479 -v switch adds some more detail, such as changed files, manifest
1480 1480 hashes or message signatures.
1481 1481 """
1482 1482 class dui:
1483 1483 # Implement and delegate some ui protocol. Save hunks of
1484 1484 # output for later display in the desired order.
1485 1485 def __init__(self, ui):
1486 1486 self.ui = ui
1487 1487 self.hunk = {}
1488 1488 def bump(self, rev):
1489 1489 self.rev = rev
1490 1490 self.hunk[rev] = []
1491 1491 def note(self, *args):
1492 1492 if self.verbose:
1493 1493 self.write(*args)
1494 1494 def status(self, *args):
1495 1495 if not self.quiet:
1496 1496 self.write(*args)
1497 1497 def write(self, *args):
1498 1498 self.hunk[self.rev].append(args)
1499 1499 def debug(self, *args):
1500 1500 if self.debugflag:
1501 1501 self.write(*args)
1502 1502 def __getattr__(self, key):
1503 1503 return getattr(self.ui, key)
1504 1504 cwd = repo.getcwd()
1505 1505 if not pats and cwd:
1506 1506 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
1507 1507 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
1508 1508 changeiter, getchange = walkchangerevs(ui, repo, (pats and cwd) or '',
1509 1509 pats, opts)
1510 1510 for st, rev, fns in changeiter:
1511 1511 if st == 'window':
1512 1512 du = dui(ui)
1513 1513 elif st == 'add':
1514 1514 du.bump(rev)
1515 1515 changenode = repo.changelog.node(rev)
1516 1516 parents = [p for p in repo.changelog.parents(changenode)
1517 1517 if p != nullid]
1518 1518 if opts['no_merges'] and len(parents) == 2:
1519 1519 continue
1520 1520 if opts['only_merges'] and len(parents) != 2:
1521 1521 continue
1522 1522
1523 1523 br = None
1524 1524 if opts['keyword']:
1525 1525 changes = repo.changelog.read(repo.changelog.node(rev))
1526 1526 miss = 0
1527 1527 for k in [kw.lower() for kw in opts['keyword']]:
1528 1528 if not (k in changes[1].lower() or
1529 1529 k in changes[4].lower() or
1530 1530 k in " ".join(changes[3][:20]).lower()):
1531 1531 miss = 1
1532 1532 break
1533 1533 if miss:
1534 1534 continue
1535 1535
1536 1536 if opts['branch']:
1537 1537 br = repo.branchlookup([repo.changelog.node(rev)])
1538 1538
1539 1539 show_changeset(du, repo, rev, brinfo=br)
1540 1540 if opts['patch']:
1541 1541 prev = (parents and parents[0]) or nullid
1542 1542 dodiff(du, du, repo, prev, changenode, fns)
1543 1543 du.write("\n\n")
1544 1544 elif st == 'iter':
1545 1545 for args in du.hunk[rev]:
1546 1546 ui.write(*args)
1547 1547
1548 1548 def manifest(ui, repo, rev=None):
1549 1549 """output the latest or given revision of the project manifest
1550 1550
1551 1551 Print a list of version controlled files for the given revision.
1552 1552
1553 1553 The manifest is the list of files being version controlled. If no revision
1554 1554 is given then the tip is used.
1555 1555 """
1556 1556 if rev:
1557 1557 try:
1558 1558 # assume all revision numbers are for changesets
1559 1559 n = repo.lookup(rev)
1560 1560 change = repo.changelog.read(n)
1561 1561 n = change[0]
1562 1562 except hg.RepoError:
1563 1563 n = repo.manifest.lookup(rev)
1564 1564 else:
1565 1565 n = repo.manifest.tip()
1566 1566 m = repo.manifest.read(n)
1567 1567 mf = repo.manifest.readflags(n)
1568 1568 files = m.keys()
1569 1569 files.sort()
1570 1570
1571 1571 for f in files:
1572 1572 ui.write("%40s %3s %s\n" % (hex(m[f]), mf[f] and "755" or "644", f))
1573 1573
1574 1574 def outgoing(ui, repo, dest="default-push", **opts):
1575 1575 """show changesets not found in destination
1576 1576
1577 1577 Show changesets not found in the specified destination repo or the
1578 1578 default push repo. These are the changesets that would be pushed
1579 1579 if a push was requested.
1580 1580 """
1581 1581 dest = ui.expandpath(dest, repo.root)
1582 1582 other = hg.repository(ui, dest)
1583 1583 o = repo.findoutgoing(other)
1584 1584 o = repo.changelog.nodesbetween(o)[0]
1585 1585 if opts['newest_first']:
1586 1586 o.reverse()
1587 1587 for n in o:
1588 1588 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1589 1589 if opts['no_merges'] and len(parents) == 2:
1590 1590 continue
1591 1591 show_changeset(ui, repo, changenode=n)
1592 1592 if opts['patch']:
1593 1593 prev = (parents and parents[0]) or nullid
1594 1594 dodiff(ui, ui, repo, prev, n)
1595 1595 ui.write("\n")
1596 1596
1597 1597 def parents(ui, repo, rev=None):
1598 1598 """show the parents of the working dir or revision
1599 1599
1600 1600 Print the working directory's parent revisions.
1601 1601 """
1602 1602 if rev:
1603 1603 p = repo.changelog.parents(repo.lookup(rev))
1604 1604 else:
1605 1605 p = repo.dirstate.parents()
1606 1606
1607 1607 for n in p:
1608 1608 if n != nullid:
1609 1609 show_changeset(ui, repo, changenode=n)
1610 1610
1611 1611 def paths(ui, search=None):
1612 1612 """show definition of symbolic path names
1613 1613
1614 1614 Show definition of symbolic path name NAME. If no name is given, show
1615 1615 definition of available names.
1616 1616
1617 1617 Path names are defined in the [paths] section of /etc/mercurial/hgrc
1618 1618 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
1619 1619 """
1620 1620 try:
1621 1621 repo = hg.repository(ui=ui)
1622 1622 except hg.RepoError:
1623 1623 pass
1624 1624
1625 1625 if search:
1626 1626 for name, path in ui.configitems("paths"):
1627 1627 if name == search:
1628 1628 ui.write("%s\n" % path)
1629 1629 return
1630 1630 ui.warn(_("not found!\n"))
1631 1631 return 1
1632 1632 else:
1633 1633 for name, path in ui.configitems("paths"):
1634 1634 ui.write("%s = %s\n" % (name, path))
1635 1635
1636 1636 def pull(ui, repo, source="default", **opts):
1637 1637 """pull changes from the specified source
1638 1638
1639 1639 Pull changes from a remote repository to a local one.
1640 1640
1641 1641 This finds all changes from the repository at the specified path
1642 1642 or URL and adds them to the local repository. By default, this
1643 1643 does not update the copy of the project in the working directory.
1644 1644
1645 1645 Valid URLs are of the form:
1646 1646
1647 1647 local/filesystem/path
1648 1648 http://[user@]host[:port][/path]
1649 1649 https://[user@]host[:port][/path]
1650 1650 ssh://[user@]host[:port][/path]
1651 1651
1652 1652 SSH requires an accessible shell account on the destination machine
1653 1653 and a copy of hg in the remote path. With SSH, paths are relative
1654 1654 to the remote user's home directory by default; use two slashes at
1655 1655 the start of a path to specify it as relative to the filesystem root.
1656 1656 """
1657 1657 source = ui.expandpath(source, repo.root)
1658 1658 ui.status(_('pulling from %s\n') % (source))
1659 1659
1660 1660 if opts['ssh']:
1661 1661 ui.setconfig("ui", "ssh", opts['ssh'])
1662 1662 if opts['remotecmd']:
1663 1663 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
1664 1664
1665 1665 other = hg.repository(ui, source)
1666 1666 revs = None
1667 1667 if opts['rev'] and not other.local():
1668 1668 raise util.Abort("pull -r doesn't work for remote repositories yet")
1669 1669 elif opts['rev']:
1670 1670 revs = [other.lookup(rev) for rev in opts['rev']]
1671 1671 r = repo.pull(other, heads=revs)
1672 1672 if not r:
1673 1673 if opts['update']:
1674 1674 return update(ui, repo)
1675 1675 else:
1676 1676 ui.status(_("(run 'hg update' to get a working copy)\n"))
1677 1677
1678 1678 return r
1679 1679
1680 1680 def push(ui, repo, dest="default-push", force=False, ssh=None, remotecmd=None):
1681 1681 """push changes to the specified destination
1682 1682
1683 1683 Push changes from the local repository to the given destination.
1684 1684
1685 1685 This is the symmetrical operation for pull. It helps to move
1686 1686 changes from the current repository to a different one. If the
1687 1687 destination is local this is identical to a pull in that directory
1688 1688 from the current one.
1689 1689
1690 1690 By default, push will refuse to run if it detects the result would
1691 1691 increase the number of remote heads. This generally indicates the
1692 1692 the client has forgotten to sync and merge before pushing.
1693 1693
1694 1694 Valid URLs are of the form:
1695 1695
1696 1696 local/filesystem/path
1697 1697 ssh://[user@]host[:port][/path]
1698 1698
1699 1699 SSH requires an accessible shell account on the destination
1700 1700 machine and a copy of hg in the remote path.
1701 1701 """
1702 1702 dest = ui.expandpath(dest, repo.root)
1703 1703 ui.status('pushing to %s\n' % (dest))
1704 1704
1705 1705 if ssh:
1706 1706 ui.setconfig("ui", "ssh", ssh)
1707 1707 if remotecmd:
1708 1708 ui.setconfig("ui", "remotecmd", remotecmd)
1709 1709
1710 1710 other = hg.repository(ui, dest)
1711 1711 r = repo.push(other, force)
1712 1712 return r
1713 1713
1714 1714 def rawcommit(ui, repo, *flist, **rc):
1715 1715 """raw commit interface
1716 1716
1717 1717 Lowlevel commit, for use in helper scripts.
1718 1718
1719 1719 This command is not intended to be used by normal users, as it is
1720 1720 primarily useful for importing from other SCMs.
1721 1721 """
1722 1722 message = rc['message']
1723 1723 if not message and rc['logfile']:
1724 1724 try:
1725 1725 message = open(rc['logfile']).read()
1726 1726 except IOError:
1727 1727 pass
1728 1728 if not message and not rc['logfile']:
1729 1729 raise util.Abort(_("missing commit message"))
1730 1730
1731 1731 files = relpath(repo, list(flist))
1732 1732 if rc['files']:
1733 1733 files += open(rc['files']).read().splitlines()
1734 1734
1735 1735 rc['parent'] = map(repo.lookup, rc['parent'])
1736 1736
1737 1737 try:
1738 1738 repo.rawcommit(files, message, rc['user'], rc['date'], *rc['parent'])
1739 1739 except ValueError, inst:
1740 1740 raise util.Abort(str(inst))
1741 1741
1742 1742 def recover(ui, repo):
1743 1743 """roll back an interrupted transaction
1744 1744
1745 1745 Recover from an interrupted commit or pull.
1746 1746
1747 1747 This command tries to fix the repository status after an interrupted
1748 1748 operation. It should only be necessary when Mercurial suggests it.
1749 1749 """
1750 1750 if repo.recover():
1751 1751 return repo.verify()
1752 1752 return False
1753 1753
1754 1754 def remove(ui, repo, pat, *pats, **opts):
1755 1755 """remove the specified files on the next commit
1756 1756
1757 1757 Schedule the indicated files for removal from the repository.
1758 1758
1759 1759 This command schedules the files to be removed at the next commit.
1760 1760 This only removes files from the current branch, not from the
1761 1761 entire project history. If the files still exist in the working
1762 1762 directory, they will be deleted from it.
1763 1763 """
1764 1764 names = []
1765 1765 def okaytoremove(abs, rel, exact):
1766 1766 c, a, d, u = repo.changes(files = [abs])
1767 1767 reason = None
1768 1768 if c: reason = _('is modified')
1769 1769 elif a: reason = _('has been marked for add')
1770 1770 elif u: reason = _('is not managed')
1771 1771 if reason:
1772 1772 if exact: ui.warn(_('not removing %s: file %s\n') % (rel, reason))
1773 1773 else:
1774 1774 return True
1775 1775 for src, abs, rel, exact in walk(repo, (pat,) + pats, opts):
1776 1776 if okaytoremove(abs, rel, exact):
1777 1777 if ui.verbose or not exact: ui.status(_('removing %s\n') % rel)
1778 1778 names.append(abs)
1779 1779 repo.remove(names, unlink=True)
1780 1780
1781 1781 def rename(ui, repo, *pats, **opts):
1782 1782 """rename files; equivalent of copy + remove
1783 1783
1784 1784 Mark dest as copies of sources; mark sources for deletion. If
1785 1785 dest is a directory, copies are put in that directory. If dest is
1786 1786 a file, there can only be one source.
1787 1787
1788 1788 By default, this command copies the contents of files as they
1789 1789 stand in the working directory. If invoked with --after, the
1790 1790 operation is recorded, but no copying is performed.
1791 1791
1792 1792 This command takes effect in the next commit.
1793 1793
1794 1794 NOTE: This command should be treated as experimental. While it
1795 1795 should properly record rename files, this information is not yet
1796 1796 fully used by merge, nor fully reported by log.
1797 1797 """
1798 1798 errs, copied = docopy(ui, repo, pats, opts)
1799 1799 names = []
1800 1800 for abs, rel, exact in copied:
1801 1801 if ui.verbose or not exact: ui.status(_('removing %s\n') % rel)
1802 1802 names.append(abs)
1803 1803 repo.remove(names, unlink=True)
1804 1804 return errs
1805 1805
1806 1806 def revert(ui, repo, *pats, **opts):
1807 1807 """revert modified files or dirs back to their unmodified states
1808 1808
1809 1809 Revert any uncommitted modifications made to the named files or
1810 1810 directories. This restores the contents of the affected files to
1811 1811 an unmodified state.
1812 1812
1813 1813 If a file has been deleted, it is recreated. If the executable
1814 1814 mode of a file was changed, it is reset.
1815 1815
1816 1816 If names are given, all files matching the names are reverted.
1817 1817
1818 1818 If no names are given, all files in the current directory and
1819 1819 its subdirectories are reverted.
1820 1820 """
1821 1821 node = opts['rev'] and repo.lookup(opts['rev']) or \
1822 1822 repo.dirstate.parents()[0]
1823 1823
1824 1824 files, choose, anypats = matchpats(repo, repo.getcwd(), pats, opts)
1825 1825 (c, a, d, u) = repo.changes(match=choose)
1826 1826 repo.forget(a)
1827 1827 repo.undelete(d)
1828 1828
1829 1829 return repo.update(node, False, True, choose, False)
1830 1830
1831 1831 def root(ui, repo):
1832 1832 """print the root (top) of the current working dir
1833 1833
1834 1834 Print the root directory of the current repository.
1835 1835 """
1836 1836 ui.write(repo.root + "\n")
1837 1837
1838 1838 def serve(ui, repo, **opts):
1839 1839 """export the repository via HTTP
1840 1840
1841 1841 Start a local HTTP repository browser and pull server.
1842 1842
1843 1843 By default, the server logs accesses to stdout and errors to
1844 1844 stderr. Use the "-A" and "-E" options to log to files.
1845 1845 """
1846 1846
1847 1847 if opts["stdio"]:
1848 1848 fin, fout = sys.stdin, sys.stdout
1849 1849 sys.stdout = sys.stderr
1850 1850
1851 1851 # Prevent insertion/deletion of CRs
1852 1852 util.set_binary(fin)
1853 1853 util.set_binary(fout)
1854 1854
1855 1855 def getarg():
1856 1856 argline = fin.readline()[:-1]
1857 1857 arg, l = argline.split()
1858 1858 val = fin.read(int(l))
1859 1859 return arg, val
1860 1860 def respond(v):
1861 1861 fout.write("%d\n" % len(v))
1862 1862 fout.write(v)
1863 1863 fout.flush()
1864 1864
1865 1865 lock = None
1866 1866
1867 1867 while 1:
1868 1868 cmd = fin.readline()[:-1]
1869 1869 if cmd == '':
1870 1870 return
1871 1871 if cmd == "heads":
1872 1872 h = repo.heads()
1873 1873 respond(" ".join(map(hex, h)) + "\n")
1874 1874 if cmd == "lock":
1875 1875 lock = repo.lock()
1876 1876 respond("")
1877 1877 if cmd == "unlock":
1878 1878 if lock:
1879 1879 lock.release()
1880 1880 lock = None
1881 1881 respond("")
1882 1882 elif cmd == "branches":
1883 1883 arg, nodes = getarg()
1884 1884 nodes = map(bin, nodes.split(" "))
1885 1885 r = []
1886 1886 for b in repo.branches(nodes):
1887 1887 r.append(" ".join(map(hex, b)) + "\n")
1888 1888 respond("".join(r))
1889 1889 elif cmd == "between":
1890 1890 arg, pairs = getarg()
1891 1891 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
1892 1892 r = []
1893 1893 for b in repo.between(pairs):
1894 1894 r.append(" ".join(map(hex, b)) + "\n")
1895 1895 respond("".join(r))
1896 1896 elif cmd == "changegroup":
1897 1897 nodes = []
1898 1898 arg, roots = getarg()
1899 1899 nodes = map(bin, roots.split(" "))
1900 1900
1901 1901 cg = repo.changegroup(nodes)
1902 1902 while 1:
1903 1903 d = cg.read(4096)
1904 1904 if not d:
1905 1905 break
1906 1906 fout.write(d)
1907 1907
1908 1908 fout.flush()
1909 1909
1910 1910 elif cmd == "addchangegroup":
1911 1911 if not lock:
1912 1912 respond("not locked")
1913 1913 continue
1914 1914 respond("")
1915 1915
1916 1916 r = repo.addchangegroup(fin)
1917 1917 respond("")
1918 1918
1919 1919 optlist = "name templates style address port ipv6 accesslog errorlog"
1920 1920 for o in optlist.split():
1921 1921 if opts[o]:
1922 1922 ui.setconfig("web", o, opts[o])
1923 1923
1924 1924 try:
1925 1925 httpd = hgweb.create_server(repo)
1926 1926 except socket.error, inst:
1927 1927 raise util.Abort('cannot start server: ' + inst.args[1])
1928 1928
1929 1929 if ui.verbose:
1930 1930 addr, port = httpd.socket.getsockname()
1931 1931 if addr == '0.0.0.0':
1932 1932 addr = socket.gethostname()
1933 1933 else:
1934 1934 try:
1935 1935 addr = socket.gethostbyaddr(addr)[0]
1936 1936 except socket.error:
1937 1937 pass
1938 1938 if port != 80:
1939 1939 ui.status(_('listening at http://%s:%d/\n') % (addr, port))
1940 1940 else:
1941 1941 ui.status(_('listening at http://%s/\n') % addr)
1942 1942 httpd.serve_forever()
1943 1943
1944 1944 def status(ui, repo, *pats, **opts):
1945 1945 """show changed files in the working directory
1946 1946
1947 1947 Show changed files in the working directory. If no names are
1948 1948 given, all files are shown. Otherwise, only files matching the
1949 1949 given names are shown.
1950 1950
1951 1951 The codes used to show the status of files are:
1952 1952 M = modified
1953 1953 A = added
1954 1954 R = removed
1955 1955 ? = not tracked
1956 1956 """
1957 1957
1958 1958 cwd = repo.getcwd()
1959 1959 files, matchfn, anypats = matchpats(repo, cwd, pats, opts)
1960 1960 (c, a, d, u) = [[util.pathto(cwd, x) for x in n]
1961 1961 for n in repo.changes(files=files, match=matchfn)]
1962 1962
1963 1963 changetypes = [(_('modified'), 'M', c),
1964 1964 (_('added'), 'A', a),
1965 1965 (_('removed'), 'R', d),
1966 1966 (_('unknown'), '?', u)]
1967 1967
1968 1968 end = opts['print0'] and '\0' or '\n'
1969 1969
1970 1970 for opt, char, changes in ([ct for ct in changetypes if opts[ct[0]]]
1971 1971 or changetypes):
1972 1972 if opts['no_status']:
1973 1973 format = "%%s%s" % end
1974 1974 else:
1975 1975 format = "%s %%s%s" % (char, end);
1976 1976
1977 1977 for f in changes:
1978 1978 ui.write(format % f)
1979 1979
1980 1980 def tag(ui, repo, name, rev=None, **opts):
1981 1981 """add a tag for the current tip or a given revision
1982 1982
1983 1983 Name a particular revision using <name>.
1984 1984
1985 1985 Tags are used to name particular revisions of the repository and are
1986 1986 very useful to compare different revision, to go back to significant
1987 1987 earlier versions or to mark branch points as releases, etc.
1988 1988
1989 1989 If no revision is given, the tip is used.
1990 1990
1991 1991 To facilitate version control, distribution, and merging of tags,
1992 1992 they are stored as a file named ".hgtags" which is managed
1993 1993 similarly to other project files and can be hand-edited if
1994 1994 necessary.
1995 1995 """
1996 1996 if name == "tip":
1997 1997 raise util.Abort(_("the name 'tip' is reserved"))
1998 1998 if 'rev' in opts:
1999 1999 rev = opts['rev']
2000 2000 if rev:
2001 2001 r = hex(repo.lookup(rev))
2002 2002 else:
2003 2003 r = hex(repo.changelog.tip())
2004 2004
2005 2005 if name.find(revrangesep) >= 0:
2006 2006 raise util.Abort(_("'%s' cannot be used in a tag name") % revrangesep)
2007 2007
2008 2008 if opts['local']:
2009 2009 repo.opener("localtags", "a").write("%s %s\n" % (r, name))
2010 2010 return
2011 2011
2012 2012 (c, a, d, u) = repo.changes()
2013 2013 for x in (c, a, d, u):
2014 2014 if ".hgtags" in x:
2015 2015 raise util.Abort(_("working copy of .hgtags is changed "
2016 2016 "(please commit .hgtags manually)"))
2017 2017
2018 2018 repo.wfile(".hgtags", "ab").write("%s %s\n" % (r, name))
2019 2019 if repo.dirstate.state(".hgtags") == '?':
2020 2020 repo.add([".hgtags"])
2021 2021
2022 2022 message = (opts['message'] or
2023 2023 _("Added tag %s for changeset %s") % (name, r))
2024 2024 try:
2025 2025 repo.commit([".hgtags"], message, opts['user'], opts['date'])
2026 2026 except ValueError, inst:
2027 2027 raise util.Abort(str(inst))
2028 2028
2029 2029 def tags(ui, repo):
2030 2030 """list repository tags
2031 2031
2032 2032 List the repository tags.
2033 2033
2034 2034 This lists both regular and local tags.
2035 2035 """
2036 2036
2037 2037 l = repo.tagslist()
2038 2038 l.reverse()
2039 2039 for t, n in l:
2040 2040 try:
2041 2041 r = "%5d:%s" % (repo.changelog.rev(n), hex(n))
2042 2042 except KeyError:
2043 2043 r = " ?:?"
2044 2044 ui.write("%-30s %s\n" % (t, r))
2045 2045
2046 2046 def tip(ui, repo):
2047 2047 """show the tip revision
2048 2048
2049 2049 Show the tip revision.
2050 2050 """
2051 2051 n = repo.changelog.tip()
2052 2052 show_changeset(ui, repo, changenode=n)
2053 2053
2054 2054 def unbundle(ui, repo, fname):
2055 2055 """apply a changegroup file
2056 2056
2057 2057 Apply a compressed changegroup file generated by the bundle
2058 2058 command.
2059 2059 """
2060 2060 f = urllib.urlopen(fname)
2061 2061
2062 2062 if f.read(4) != "HG10":
2063 2063 raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
2064 2064
2065 2065 def bzgenerator(f):
2066 2066 zd = bz2.BZ2Decompressor()
2067 2067 for chunk in f:
2068 2068 yield zd.decompress(chunk)
2069 2069
2070 2070 bzgen = bzgenerator(util.filechunkiter(f, 4096))
2071 2071 repo.addchangegroup(util.chunkbuffer(bzgen))
2072 2072
2073 2073 def undo(ui, repo):
2074 2074 """undo the last commit or pull
2075 2075
2076 2076 Roll back the last pull or commit transaction on the
2077 2077 repository, restoring the project to its earlier state.
2078 2078
2079 2079 This command should be used with care. There is only one level of
2080 2080 undo and there is no redo.
2081 2081
2082 2082 This command is not intended for use on public repositories. Once
2083 2083 a change is visible for pull by other users, undoing it locally is
2084 2084 ineffective.
2085 2085 """
2086 2086 repo.undo()
2087 2087
2088 2088 def update(ui, repo, node=None, merge=False, clean=False, branch=None):
2089 2089 """update or merge working directory
2090 2090
2091 2091 Update the working directory to the specified revision.
2092 2092
2093 2093 If there are no outstanding changes in the working directory and
2094 2094 there is a linear relationship between the current version and the
2095 2095 requested version, the result is the requested version.
2096 2096
2097 2097 Otherwise the result is a merge between the contents of the
2098 2098 current working directory and the requested version. Files that
2099 2099 changed between either parent are marked as changed for the next
2100 2100 commit and a commit must be performed before any further updates
2101 2101 are allowed.
2102 2102
2103 2103 By default, update will refuse to run if doing so would require
2104 2104 merging or discarding local changes.
2105 2105 """
2106 2106 if branch:
2107 2107 br = repo.branchlookup(branch=branch)
2108 2108 found = []
2109 2109 for x in br:
2110 2110 if branch in br[x]:
2111 2111 found.append(x)
2112 2112 if len(found) > 1:
2113 2113 ui.warn(_("Found multiple heads for %s\n") % branch)
2114 2114 for x in found:
2115 2115 show_changeset(ui, repo, changenode=x, brinfo=br)
2116 2116 return 1
2117 2117 if len(found) == 1:
2118 2118 node = found[0]
2119 2119 ui.warn(_("Using head %s for branch %s\n") % (short(node), branch))
2120 2120 else:
2121 2121 ui.warn(_("branch %s not found\n") % (branch))
2122 2122 return 1
2123 2123 else:
2124 2124 node = node and repo.lookup(node) or repo.changelog.tip()
2125 2125 return repo.update(node, allow=merge, force=clean)
2126 2126
2127 2127 def verify(ui, repo):
2128 2128 """verify the integrity of the repository
2129 2129
2130 2130 Verify the integrity of the current repository.
2131 2131
2132 2132 This will perform an extensive check of the repository's
2133 2133 integrity, validating the hashes and checksums of each entry in
2134 2134 the changelog, manifest, and tracked files, as well as the
2135 2135 integrity of their crosslinks and indices.
2136 2136 """
2137 2137 return repo.verify()
2138 2138
2139 2139 # Command options and aliases are listed here, alphabetically
2140 2140
2141 2141 table = {
2142 2142 "^add":
2143 2143 (add,
2144 2144 [('I', 'include', [], _('include names matching the given patterns')),
2145 2145 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2146 2146 "hg add [OPTION]... [FILE]..."),
2147 2147 "addremove":
2148 2148 (addremove,
2149 2149 [('I', 'include', [], _('include names matching the given patterns')),
2150 2150 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2151 2151 "hg addremove [OPTION]... [FILE]..."),
2152 2152 "^annotate":
2153 2153 (annotate,
2154 2154 [('r', 'rev', '', _('annotate the specified revision')),
2155 2155 ('a', 'text', None, _('treat all files as text')),
2156 2156 ('u', 'user', None, _('list the author')),
2157 2157 ('d', 'date', None, _('list the date')),
2158 2158 ('n', 'number', None, _('list the revision number (default)')),
2159 2159 ('c', 'changeset', None, _('list the changeset')),
2160 2160 ('I', 'include', [], _('include names matching the given patterns')),
2161 2161 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2162 2162 _('hg annotate [OPTION]... FILE...')),
2163 2163 "bundle":
2164 2164 (bundle,
2165 2165 [],
2166 2166 _('hg bundle FILE DEST')),
2167 2167 "cat":
2168 2168 (cat,
2169 2169 [('I', 'include', [], _('include names matching the given patterns')),
2170 2170 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2171 2171 ('o', 'output', "", _('print output to file with formatted name')),
2172 2172 ('r', 'rev', '', _('print the given revision'))],
2173 2173 _('hg cat [OPTION]... FILE...')),
2174 2174 "^clone":
2175 2175 (clone,
2176 2176 [('U', 'noupdate', None, _('do not update the new working directory')),
2177 2177 ('e', 'ssh', "", _('specify ssh command to use')),
2178 2178 ('', 'pull', None, _('use pull protocol to copy metadata')),
2179 2179 ('r', 'rev', [], _('a changeset you would like to have after cloning')),
2180 2180 ('', 'remotecmd', "", _('specify hg command to run on the remote side'))],
2181 2181 _('hg clone [OPTION]... SOURCE [DEST]')),
2182 2182 "^commit|ci":
2183 2183 (commit,
2184 2184 [('A', 'addremove', None, _('run addremove during commit')),
2185 2185 ('I', 'include', [], _('include names matching the given patterns')),
2186 2186 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2187 2187 ('m', 'message', "", _('use <text> as commit message')),
2188 2188 ('l', 'logfile', "", _('read the commit message from <file>')),
2189 2189 ('d', 'date', "", _('record datecode as commit date')),
2190 2190 ('u', 'user', "", _('record user as commiter'))],
2191 2191 _('hg commit [OPTION]... [FILE]...')),
2192 2192 "copy|cp": (copy,
2193 2193 [('I', 'include', [], _('include names matching the given patterns')),
2194 2194 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2195 2195 ('A', 'after', None, _('record a copy that has already occurred')),
2196 2196 ('f', 'force', None, _('forcibly copy over an existing managed file'))],
2197 2197 _('hg copy [OPTION]... [SOURCE]... DEST')),
2198 2198 "debugancestor": (debugancestor, [], _('debugancestor INDEX REV1 REV2')),
2199 2199 "debugcheckstate": (debugcheckstate, [], _('debugcheckstate')),
2200 2200 "debugconfig": (debugconfig, [], _('debugconfig')),
2201 2201 "debugsetparents": (debugsetparents, [], _('debugsetparents REV1 [REV2]')),
2202 2202 "debugstate": (debugstate, [], _('debugstate')),
2203 2203 "debugdata": (debugdata, [], _('debugdata FILE REV')),
2204 2204 "debugindex": (debugindex, [], _('debugindex FILE')),
2205 2205 "debugindexdot": (debugindexdot, [], _('debugindexdot FILE')),
2206 2206 "debugrename": (debugrename, [], _('debugrename FILE [REV]')),
2207 2207 "debugwalk":
2208 2208 (debugwalk,
2209 2209 [('I', 'include', [], _('include names matching the given patterns')),
2210 2210 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2211 2211 _('debugwalk [OPTION]... [FILE]...')),
2212 2212 "^diff":
2213 2213 (diff,
2214 2214 [('r', 'rev', [], _('revision')),
2215 2215 ('a', 'text', None, _('treat all files as text')),
2216 2216 ('I', 'include', [], _('include names matching the given patterns')),
2217 2217 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2218 2218 _('hg diff [-a] [-I] [-X] [-r REV1 [-r REV2]] [FILE]...')),
2219 2219 "^export":
2220 2220 (export,
2221 2221 [('o', 'output', "", _('print output to file with formatted name')),
2222 2222 ('a', 'text', None, _('treat all files as text'))],
2223 2223 "hg export [-a] [-o OUTFILE] REV..."),
2224 2224 "forget":
2225 2225 (forget,
2226 2226 [('I', 'include', [], _('include names matching the given patterns')),
2227 2227 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2228 2228 "hg forget [OPTION]... FILE..."),
2229 2229 "grep":
2230 2230 (grep,
2231 2231 [('0', 'print0', None, _('end fields with NUL')),
2232 2232 ('I', 'include', [], _('include names matching the given patterns')),
2233 2233 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2234 2234 ('', 'all', None, _('print all revisions that match')),
2235 2235 ('i', 'ignore-case', None, _('ignore case when matching')),
2236 2236 ('l', 'files-with-matches', None, _('print only filenames and revs that match')),
2237 2237 ('n', 'line-number', None, _('print matching line numbers')),
2238 2238 ('r', 'rev', [], _('search in given revision range')),
2239 2239 ('u', 'user', None, _('print user who committed change'))],
2240 2240 "hg grep [OPTION]... PATTERN [FILE]..."),
2241 2241 "heads":
2242 2242 (heads,
2243 2243 [('b', 'branches', None, _('find branch info')),
2244 ('r', 'rev', None, _('show only heads descendants from rev'))],
2244 ('r', 'rev', "", _('show only heads which are descendants of rev'))],
2245 2245 _('hg heads [-b] [-r <rev>]')),
2246 2246 "help": (help_, [], _('hg help [COMMAND]')),
2247 2247 "identify|id": (identify, [], _('hg identify')),
2248 2248 "import|patch":
2249 2249 (import_,
2250 2250 [('p', 'strip', 1, _('directory strip option for patch. This has the same\n') +
2251 2251 _('meaning as the corresponding patch option')),
2252 2252 ('f', 'force', None, _('skip check for outstanding uncommitted changes')),
2253 2253 ('b', 'base', "", _('base path'))],
2254 2254 "hg import [-f] [-p NUM] [-b BASE] PATCH..."),
2255 2255 "incoming|in": (incoming,
2256 2256 [('M', 'no-merges', None, _("do not show merges")),
2257 2257 ('p', 'patch', None, _('show patch')),
2258 2258 ('n', 'newest-first', None, _('show newest record first'))],
2259 2259 _('hg incoming [-p] [-n] [-M] [SOURCE]')),
2260 2260 "^init": (init, [], _('hg init [DEST]')),
2261 2261 "locate":
2262 2262 (locate,
2263 2263 [('r', 'rev', '', _('search the repository as it stood at rev')),
2264 2264 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
2265 2265 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
2266 2266 ('I', 'include', [], _('include names matching the given patterns')),
2267 2267 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2268 2268 _('hg locate [OPTION]... [PATTERN]...')),
2269 2269 "^log|history":
2270 2270 (log,
2271 2271 [('I', 'include', [], _('include names matching the given patterns')),
2272 2272 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2273 2273 ('b', 'branch', None, _('show branches')),
2274 2274 ('k', 'keyword', [], _('search for a keyword')),
2275 2275 ('r', 'rev', [], _('show the specified revision or range')),
2276 2276 ('M', 'no-merges', None, _("do not show merges")),
2277 2277 ('m', 'only-merges', None, _("show only merges")),
2278 2278 ('p', 'patch', None, _('show patch'))],
2279 2279 _('hg log [-I] [-X] [-r REV]... [-p] [FILE]')),
2280 2280 "manifest": (manifest, [], _('hg manifest [REV]')),
2281 2281 "outgoing|out": (outgoing,
2282 2282 [('M', 'no-merges', None, _("do not show merges")),
2283 2283 ('p', 'patch', None, _('show patch')),
2284 2284 ('n', 'newest-first', None, _('show newest record first'))],
2285 2285 _('hg outgoing [-p] [-n] [-M] [DEST]')),
2286 2286 "^parents": (parents, [], _('hg parents [REV]')),
2287 2287 "paths": (paths, [], _('hg paths [NAME]')),
2288 2288 "^pull":
2289 2289 (pull,
2290 2290 [('u', 'update', None, _('update the working directory to tip after pull')),
2291 2291 ('e', 'ssh', "", _('specify ssh command to use')),
2292 2292 ('r', 'rev', [], _('a specific revision you would like to pull')),
2293 2293 ('', 'remotecmd', "", _('specify hg command to run on the remote side'))],
2294 2294 _('hg pull [-u] [-e FILE] [-r rev] [--remotecmd FILE] [SOURCE]')),
2295 2295 "^push":
2296 2296 (push,
2297 2297 [('f', 'force', None, _('force push')),
2298 2298 ('e', 'ssh', "", _('specify ssh command to use')),
2299 2299 ('', 'remotecmd', "", _('specify hg command to run on the remote side'))],
2300 2300 _('hg push [-f] [-e FILE] [--remotecmd FILE] [DEST]')),
2301 2301 "rawcommit":
2302 2302 (rawcommit,
2303 2303 [('p', 'parent', [], _('parent')),
2304 2304 ('d', 'date', "", _('date code')),
2305 2305 ('u', 'user', "", _('user')),
2306 2306 ('F', 'files', "", _('file list')),
2307 2307 ('m', 'message', "", _('commit message')),
2308 2308 ('l', 'logfile', "", _('commit message file'))],
2309 2309 _('hg rawcommit [OPTION]... [FILE]...')),
2310 2310 "recover": (recover, [], _("hg recover")),
2311 2311 "^remove|rm": (remove,
2312 2312 [('I', 'include', [], _('include names matching the given patterns')),
2313 2313 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2314 2314 _("hg remove [OPTION]... FILE...")),
2315 2315 "rename|mv": (rename,
2316 2316 [('I', 'include', [], _('include names matching the given patterns')),
2317 2317 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2318 2318 ('A', 'after', None, _('record a rename that has already occurred')),
2319 2319 ('f', 'force', None, _('forcibly copy over an existing managed file'))],
2320 2320 _('hg rename [OPTION]... [SOURCE]... DEST')),
2321 2321 "^revert":
2322 2322 (revert,
2323 2323 [('I', 'include', [], _('include names matching the given patterns')),
2324 2324 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2325 2325 ("r", "rev", "", _("revision to revert to"))],
2326 2326 _("hg revert [-n] [-r REV] [NAME]...")),
2327 2327 "root": (root, [], _("hg root")),
2328 2328 "^serve":
2329 2329 (serve,
2330 2330 [('A', 'accesslog', '', _('name of access log file to write to')),
2331 2331 ('E', 'errorlog', '', _('name of error log file to write to')),
2332 2332 ('p', 'port', 0, _('port to use (default: 8000)')),
2333 2333 ('a', 'address', '', _('address to use')),
2334 2334 ('n', 'name', "", _('name to show in web pages (default: working dir)')),
2335 2335 ('', 'stdio', None, _('for remote clients')),
2336 2336 ('t', 'templates', "", _('web templates to use')),
2337 2337 ('', 'style', "", _('template style to use')),
2338 2338 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4'))],
2339 2339 _("hg serve [OPTION]...")),
2340 2340 "^status|st":
2341 2341 (status,
2342 2342 [('m', 'modified', None, _('show only modified files')),
2343 2343 ('a', 'added', None, _('show only added files')),
2344 2344 ('r', 'removed', None, _('show only removed files')),
2345 2345 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
2346 2346 ('n', 'no-status', None, _('hide status prefix')),
2347 2347 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
2348 2348 ('I', 'include', [], _('include names matching the given patterns')),
2349 2349 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2350 2350 _("hg status [OPTION]... [FILE]...")),
2351 2351 "tag":
2352 2352 (tag,
2353 2353 [('l', 'local', None, _('make the tag local')),
2354 2354 ('m', 'message', "", _('message for tag commit log entry')),
2355 2355 ('d', 'date', "", _('record datecode as commit date')),
2356 2356 ('u', 'user', "", _('record user as commiter')),
2357 2357 ('r', 'rev', "", _('revision to tag'))],
2358 2358 _('hg tag [OPTION]... NAME [REV]')),
2359 2359 "tags": (tags, [], _('hg tags')),
2360 2360 "tip": (tip, [], _('hg tip')),
2361 2361 "unbundle":
2362 2362 (unbundle,
2363 2363 [],
2364 2364 _('hg unbundle FILE')),
2365 2365 "undo": (undo, [], _('hg undo')),
2366 2366 "^update|up|checkout|co":
2367 2367 (update,
2368 2368 [('b', 'branch', "", _('checkout the head of a specific branch')),
2369 2369 ('m', 'merge', None, _('allow merging of branches')),
2370 2370 ('C', 'clean', None, _('overwrite locally modified files'))],
2371 2371 _('hg update [-b TAG] [-m] [-C] [REV]')),
2372 2372 "verify": (verify, [], _('hg verify')),
2373 2373 "version": (show_version, [], _('hg version')),
2374 2374 }
2375 2375
2376 2376 globalopts = [
2377 2377 ('R', 'repository', "", _("repository root directory")),
2378 2378 ('', 'cwd', '', _("change working directory")),
2379 2379 ('y', 'noninteractive', None, _("do not prompt, assume 'yes' for any required answers")),
2380 2380 ('q', 'quiet', None, _("suppress output")),
2381 2381 ('v', 'verbose', None, _("enable additional output")),
2382 2382 ('', 'debug', None, _("enable debugging output")),
2383 2383 ('', 'debugger', None, _("start debugger")),
2384 2384 ('', 'traceback', None, _("print traceback on exception")),
2385 2385 ('', 'time', None, _("time how long the command takes")),
2386 2386 ('', 'profile', None, _("print command execution profile")),
2387 2387 ('', 'version', None, _("output version information and exit")),
2388 2388 ('h', 'help', None, _("display help and exit")),
2389 2389 ]
2390 2390
2391 2391 norepo = ("clone init version help debugancestor debugconfig debugdata"
2392 2392 " debugindex debugindexdot paths")
2393 2393
2394 2394 def find(cmd):
2395 2395 """Return (aliases, command table entry) for command string."""
2396 2396 choice = None
2397 2397 for e in table.keys():
2398 2398 aliases = e.lstrip("^").split("|")
2399 2399 if cmd in aliases:
2400 2400 return aliases, table[e]
2401 2401 for a in aliases:
2402 2402 if a.startswith(cmd):
2403 2403 if choice:
2404 2404 raise AmbiguousCommand(cmd)
2405 2405 else:
2406 2406 choice = aliases, table[e]
2407 2407 break
2408 2408 if choice:
2409 2409 return choice
2410 2410
2411 2411 raise UnknownCommand(cmd)
2412 2412
2413 2413 class SignalInterrupt(Exception):
2414 2414 """Exception raised on SIGTERM and SIGHUP."""
2415 2415
2416 2416 def catchterm(*args):
2417 2417 raise SignalInterrupt
2418 2418
2419 2419 def run():
2420 2420 sys.exit(dispatch(sys.argv[1:]))
2421 2421
2422 2422 class ParseError(Exception):
2423 2423 """Exception raised on errors in parsing the command line."""
2424 2424
2425 2425 def parse(ui, args):
2426 2426 options = {}
2427 2427 cmdoptions = {}
2428 2428
2429 2429 try:
2430 2430 args = fancyopts.fancyopts(args, globalopts, options)
2431 2431 except fancyopts.getopt.GetoptError, inst:
2432 2432 raise ParseError(None, inst)
2433 2433
2434 2434 if args:
2435 2435 cmd, args = args[0], args[1:]
2436 2436 defaults = ui.config("defaults", cmd)
2437 2437 if defaults:
2438 2438 args = defaults.split() + args
2439 2439
2440 2440 aliases, i = find(cmd)
2441 2441 cmd = aliases[0]
2442 2442 c = list(i[1])
2443 2443 else:
2444 2444 cmd = None
2445 2445 c = []
2446 2446
2447 2447 # combine global options into local
2448 2448 for o in globalopts:
2449 2449 c.append((o[0], o[1], options[o[1]], o[3]))
2450 2450
2451 2451 try:
2452 2452 args = fancyopts.fancyopts(args, c, cmdoptions)
2453 2453 except fancyopts.getopt.GetoptError, inst:
2454 2454 raise ParseError(cmd, inst)
2455 2455
2456 2456 # separate global options back out
2457 2457 for o in globalopts:
2458 2458 n = o[1]
2459 2459 options[n] = cmdoptions[n]
2460 2460 del cmdoptions[n]
2461 2461
2462 2462 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
2463 2463
2464 2464 def dispatch(args):
2465 2465 signal.signal(signal.SIGTERM, catchterm)
2466 2466 try:
2467 2467 signal.signal(signal.SIGHUP, catchterm)
2468 2468 except AttributeError:
2469 2469 pass
2470 2470
2471 2471 try:
2472 2472 u = ui.ui()
2473 2473 except util.Abort, inst:
2474 2474 sys.stderr.write(_("abort: %s\n") % inst)
2475 2475 sys.exit(1)
2476 2476
2477 2477 external = []
2478 2478 for x in u.extensions():
2479 2479 def on_exception(exc, inst):
2480 2480 u.warn(_("*** failed to import extension %s\n") % x[1])
2481 2481 u.warn("%s\n" % inst)
2482 2482 if "--traceback" in sys.argv[1:]:
2483 2483 traceback.print_exc()
2484 2484 if x[1]:
2485 2485 try:
2486 2486 mod = imp.load_source(x[0], x[1])
2487 2487 except Exception, inst:
2488 2488 on_exception(Exception, inst)
2489 2489 continue
2490 2490 else:
2491 2491 def importh(name):
2492 2492 mod = __import__(name)
2493 2493 components = name.split('.')
2494 2494 for comp in components[1:]:
2495 2495 mod = getattr(mod, comp)
2496 2496 return mod
2497 2497 try:
2498 2498 mod = importh(x[0])
2499 2499 except Exception, inst:
2500 2500 on_exception(Exception, inst)
2501 2501 continue
2502 2502
2503 2503 external.append(mod)
2504 2504 for x in external:
2505 2505 cmdtable = getattr(x, 'cmdtable', {})
2506 2506 for t in cmdtable:
2507 2507 if t in table:
2508 2508 u.warn(_("module %s overrides %s\n") % (x.__name__, t))
2509 2509 table.update(cmdtable)
2510 2510
2511 2511 try:
2512 2512 cmd, func, args, options, cmdoptions = parse(u, args)
2513 2513 except ParseError, inst:
2514 2514 if inst.args[0]:
2515 2515 u.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
2516 2516 help_(u, inst.args[0])
2517 2517 else:
2518 2518 u.warn(_("hg: %s\n") % inst.args[1])
2519 2519 help_(u, 'shortlist')
2520 2520 sys.exit(-1)
2521 2521 except AmbiguousCommand, inst:
2522 2522 u.warn(_("hg: command '%s' is ambiguous.\n") % inst.args[0])
2523 2523 sys.exit(1)
2524 2524 except UnknownCommand, inst:
2525 2525 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
2526 2526 help_(u, 'shortlist')
2527 2527 sys.exit(1)
2528 2528
2529 2529 if options["time"]:
2530 2530 def get_times():
2531 2531 t = os.times()
2532 2532 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
2533 2533 t = (t[0], t[1], t[2], t[3], time.clock())
2534 2534 return t
2535 2535 s = get_times()
2536 2536 def print_time():
2537 2537 t = get_times()
2538 2538 u.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
2539 2539 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
2540 2540 atexit.register(print_time)
2541 2541
2542 2542 u.updateopts(options["verbose"], options["debug"], options["quiet"],
2543 2543 not options["noninteractive"])
2544 2544
2545 2545 # enter the debugger before command execution
2546 2546 if options['debugger']:
2547 2547 pdb.set_trace()
2548 2548
2549 2549 try:
2550 2550 try:
2551 2551 if options['help']:
2552 2552 help_(u, cmd, options['version'])
2553 2553 sys.exit(0)
2554 2554 elif options['version']:
2555 2555 show_version(u)
2556 2556 sys.exit(0)
2557 2557 elif not cmd:
2558 2558 help_(u, 'shortlist')
2559 2559 sys.exit(0)
2560 2560
2561 2561 if options['cwd']:
2562 2562 try:
2563 2563 os.chdir(options['cwd'])
2564 2564 except OSError, inst:
2565 2565 raise util.Abort('%s: %s' %
2566 2566 (options['cwd'], inst.strerror))
2567 2567
2568 2568 if cmd not in norepo.split():
2569 2569 path = options["repository"] or ""
2570 2570 repo = hg.repository(ui=u, path=path)
2571 2571 for x in external:
2572 2572 if hasattr(x, 'reposetup'): x.reposetup(u, repo)
2573 2573 d = lambda: func(u, repo, *args, **cmdoptions)
2574 2574 else:
2575 2575 d = lambda: func(u, *args, **cmdoptions)
2576 2576
2577 2577 if options['profile']:
2578 2578 import hotshot, hotshot.stats
2579 2579 prof = hotshot.Profile("hg.prof")
2580 2580 r = prof.runcall(d)
2581 2581 prof.close()
2582 2582 stats = hotshot.stats.load("hg.prof")
2583 2583 stats.strip_dirs()
2584 2584 stats.sort_stats('time', 'calls')
2585 2585 stats.print_stats(40)
2586 2586 return r
2587 2587 else:
2588 2588 return d()
2589 2589 except:
2590 2590 # enter the debugger when we hit an exception
2591 2591 if options['debugger']:
2592 2592 pdb.post_mortem(sys.exc_info()[2])
2593 2593 if options['traceback']:
2594 2594 traceback.print_exc()
2595 2595 raise
2596 2596 except hg.RepoError, inst:
2597 2597 u.warn(_("abort: "), inst, "!\n")
2598 2598 except revlog.RevlogError, inst:
2599 2599 u.warn(_("abort: "), inst, "!\n")
2600 2600 except SignalInterrupt:
2601 2601 u.warn(_("killed!\n"))
2602 2602 except KeyboardInterrupt:
2603 2603 try:
2604 2604 u.warn(_("interrupted!\n"))
2605 2605 except IOError, inst:
2606 2606 if inst.errno == errno.EPIPE:
2607 2607 if u.debugflag:
2608 2608 u.warn(_("\nbroken pipe\n"))
2609 2609 else:
2610 2610 raise
2611 2611 except IOError, inst:
2612 2612 if hasattr(inst, "code"):
2613 2613 u.warn(_("abort: %s\n") % inst)
2614 2614 elif hasattr(inst, "reason"):
2615 2615 u.warn(_("abort: error: %s\n") % inst.reason[1])
2616 2616 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
2617 2617 if u.debugflag:
2618 2618 u.warn(_("broken pipe\n"))
2619 2619 elif getattr(inst, "strerror", None):
2620 2620 if getattr(inst, "filename", None):
2621 2621 u.warn(_("abort: %s - %s\n") % (inst.strerror, inst.filename))
2622 2622 else:
2623 2623 u.warn(_("abort: %s\n") % inst.strerror)
2624 2624 else:
2625 2625 raise
2626 2626 except OSError, inst:
2627 2627 if hasattr(inst, "filename"):
2628 2628 u.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
2629 2629 else:
2630 2630 u.warn(_("abort: %s\n") % inst.strerror)
2631 2631 except util.Abort, inst:
2632 2632 u.warn(_('abort: '), inst.args[0] % inst.args[1:], '\n')
2633 2633 sys.exit(1)
2634 2634 except TypeError, inst:
2635 2635 # was this an argument error?
2636 2636 tb = traceback.extract_tb(sys.exc_info()[2])
2637 2637 if len(tb) > 2: # no
2638 2638 raise
2639 2639 u.debug(inst, "\n")
2640 2640 u.warn(_("%s: invalid arguments\n") % cmd)
2641 2641 help_(u, cmd)
2642 2642 except AmbiguousCommand, inst:
2643 2643 u.warn(_("hg: command '%s' is ambiguous.\n") % inst.args[0])
2644 2644 help_(u, 'shortlist')
2645 2645 except UnknownCommand, inst:
2646 2646 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
2647 2647 help_(u, 'shortlist')
2648 2648 except SystemExit:
2649 2649 # don't catch this in the catch-all below
2650 2650 raise
2651 2651 except:
2652 2652 u.warn(_("** unknown exception encountered, details follow\n"))
2653 2653 u.warn(_("** report bug details to mercurial@selenic.com\n"))
2654 2654 raise
2655 2655
2656 2656 sys.exit(-1)
@@ -1,1780 +1,1780 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository:
16 16 def __init__(self, ui, path=None, create=0):
17 17 if not path:
18 18 p = os.getcwd()
19 19 while not os.path.isdir(os.path.join(p, ".hg")):
20 20 oldp = p
21 21 p = os.path.dirname(p)
22 22 if p == oldp: raise repo.RepoError(_("no repo found"))
23 23 path = p
24 24 self.path = os.path.join(path, ".hg")
25 25
26 26 if not create and not os.path.isdir(self.path):
27 27 raise repo.RepoError(_("repository %s not found") % self.path)
28 28
29 29 self.root = os.path.abspath(path)
30 30 self.ui = ui
31 31 self.opener = util.opener(self.path)
32 32 self.wopener = util.opener(self.root)
33 33 self.manifest = manifest.manifest(self.opener)
34 34 self.changelog = changelog.changelog(self.opener)
35 35 self.tagscache = None
36 36 self.nodetagscache = None
37 37 self.encodepats = None
38 38 self.decodepats = None
39 39
40 40 if create:
41 41 os.mkdir(self.path)
42 42 os.mkdir(self.join("data"))
43 43
44 44 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 45 try:
46 46 self.ui.readconfig(self.join("hgrc"))
47 47 except IOError: pass
48 48
49 49 def hook(self, name, **args):
50 50 def runhook(name, cmd):
51 51 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
52 52 old = {}
53 53 for k, v in args.items():
54 54 k = k.upper()
55 55 old[k] = os.environ.get(k, None)
56 56 os.environ[k] = v
57 57
58 58 # Hooks run in the repository root
59 59 olddir = os.getcwd()
60 60 os.chdir(self.root)
61 61 r = os.system(cmd)
62 62 os.chdir(olddir)
63 63
64 64 for k, v in old.items():
65 65 if v != None:
66 66 os.environ[k] = v
67 67 else:
68 68 del os.environ[k]
69 69
70 70 if r:
71 71 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
72 72 (name, r))
73 73 return False
74 74 return True
75 75
76 76 r = True
77 77 for hname, cmd in self.ui.configitems("hooks"):
78 78 s = hname.split(".")
79 79 if s[0] == name and cmd:
80 80 r = runhook(hname, cmd) and r
81 81 return r
82 82
83 83 def tags(self):
84 84 '''return a mapping of tag to node'''
85 85 if not self.tagscache:
86 86 self.tagscache = {}
87 87 def addtag(self, k, n):
88 88 try:
89 89 bin_n = bin(n)
90 90 except TypeError:
91 91 bin_n = ''
92 92 self.tagscache[k.strip()] = bin_n
93 93
94 94 try:
95 95 # read each head of the tags file, ending with the tip
96 96 # and add each tag found to the map, with "newer" ones
97 97 # taking precedence
98 98 fl = self.file(".hgtags")
99 99 h = fl.heads()
100 100 h.reverse()
101 101 for r in h:
102 102 for l in fl.read(r).splitlines():
103 103 if l:
104 104 n, k = l.split(" ", 1)
105 105 addtag(self, k, n)
106 106 except KeyError:
107 107 pass
108 108
109 109 try:
110 110 f = self.opener("localtags")
111 111 for l in f:
112 112 n, k = l.split(" ", 1)
113 113 addtag(self, k, n)
114 114 except IOError:
115 115 pass
116 116
117 117 self.tagscache['tip'] = self.changelog.tip()
118 118
119 119 return self.tagscache
120 120
121 121 def tagslist(self):
122 122 '''return a list of tags ordered by revision'''
123 123 l = []
124 124 for t, n in self.tags().items():
125 125 try:
126 126 r = self.changelog.rev(n)
127 127 except:
128 128 r = -2 # sort to the beginning of the list if unknown
129 129 l.append((r,t,n))
130 130 l.sort()
131 131 return [(t,n) for r,t,n in l]
132 132
133 133 def nodetags(self, node):
134 134 '''return the tags associated with a node'''
135 135 if not self.nodetagscache:
136 136 self.nodetagscache = {}
137 137 for t,n in self.tags().items():
138 138 self.nodetagscache.setdefault(n,[]).append(t)
139 139 return self.nodetagscache.get(node, [])
140 140
141 141 def lookup(self, key):
142 142 try:
143 143 return self.tags()[key]
144 144 except KeyError:
145 145 try:
146 146 return self.changelog.lookup(key)
147 147 except:
148 148 raise repo.RepoError(_("unknown revision '%s'") % key)
149 149
150 150 def dev(self):
151 151 return os.stat(self.path).st_dev
152 152
153 153 def local(self):
154 154 return True
155 155
156 156 def join(self, f):
157 157 return os.path.join(self.path, f)
158 158
159 159 def wjoin(self, f):
160 160 return os.path.join(self.root, f)
161 161
162 162 def file(self, f):
163 163 if f[0] == '/': f = f[1:]
164 164 return filelog.filelog(self.opener, f)
165 165
166 166 def getcwd(self):
167 167 return self.dirstate.getcwd()
168 168
169 169 def wfile(self, f, mode='r'):
170 170 return self.wopener(f, mode)
171 171
172 172 def wread(self, filename):
173 173 if self.encodepats == None:
174 174 l = []
175 175 for pat, cmd in self.ui.configitems("encode"):
176 176 mf = util.matcher("", "/", [pat], [], [])[1]
177 177 l.append((mf, cmd))
178 178 self.encodepats = l
179 179
180 180 data = self.wopener(filename, 'r').read()
181 181
182 182 for mf, cmd in self.encodepats:
183 183 if mf(filename):
184 184 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
185 185 data = util.filter(data, cmd)
186 186 break
187 187
188 188 return data
189 189
190 190 def wwrite(self, filename, data, fd=None):
191 191 if self.decodepats == None:
192 192 l = []
193 193 for pat, cmd in self.ui.configitems("decode"):
194 194 mf = util.matcher("", "/", [pat], [], [])[1]
195 195 l.append((mf, cmd))
196 196 self.decodepats = l
197 197
198 198 for mf, cmd in self.decodepats:
199 199 if mf(filename):
200 200 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
201 201 data = util.filter(data, cmd)
202 202 break
203 203
204 204 if fd:
205 205 return fd.write(data)
206 206 return self.wopener(filename, 'w').write(data)
207 207
208 208 def transaction(self):
209 209 # save dirstate for undo
210 210 try:
211 211 ds = self.opener("dirstate").read()
212 212 except IOError:
213 213 ds = ""
214 214 self.opener("journal.dirstate", "w").write(ds)
215 215
216 216 def after():
217 217 util.rename(self.join("journal"), self.join("undo"))
218 218 util.rename(self.join("journal.dirstate"),
219 219 self.join("undo.dirstate"))
220 220
221 221 return transaction.transaction(self.ui.warn, self.opener,
222 222 self.join("journal"), after)
223 223
224 224 def recover(self):
225 225 lock = self.lock()
226 226 if os.path.exists(self.join("journal")):
227 227 self.ui.status(_("rolling back interrupted transaction\n"))
228 228 transaction.rollback(self.opener, self.join("journal"))
229 229 return True
230 230 else:
231 231 self.ui.warn(_("no interrupted transaction available\n"))
232 232 return False
233 233
234 234 def undo(self):
235 235 wlock = self.wlock()
236 236 lock = self.lock()
237 237 if os.path.exists(self.join("undo")):
238 238 self.ui.status(_("rolling back last transaction\n"))
239 239 transaction.rollback(self.opener, self.join("undo"))
240 240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
241 241 self.dirstate.read()
242 242 else:
243 243 self.ui.warn(_("no undo information available\n"))
244 244
245 245 def lock(self, wait=1):
246 246 try:
247 247 return lock.lock(self.join("lock"), 0)
248 248 except lock.LockHeld, inst:
249 249 if wait:
250 250 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
251 251 return lock.lock(self.join("lock"), wait)
252 252 raise inst
253 253
254 254 def wlock(self, wait=1):
255 255 try:
256 256 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
257 257 except lock.LockHeld, inst:
258 258 if not wait:
259 259 raise inst
260 260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
261 261 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
262 262 self.dirstate.read()
263 263 return wlock
264 264
265 265 def rawcommit(self, files, text, user, date, p1=None, p2=None):
266 266 orig_parent = self.dirstate.parents()[0] or nullid
267 267 p1 = p1 or self.dirstate.parents()[0] or nullid
268 268 p2 = p2 or self.dirstate.parents()[1] or nullid
269 269 c1 = self.changelog.read(p1)
270 270 c2 = self.changelog.read(p2)
271 271 m1 = self.manifest.read(c1[0])
272 272 mf1 = self.manifest.readflags(c1[0])
273 273 m2 = self.manifest.read(c2[0])
274 274 changed = []
275 275
276 276 if orig_parent == p1:
277 277 update_dirstate = 1
278 278 else:
279 279 update_dirstate = 0
280 280
281 281 wlock = self.wlock()
282 282 lock = self.lock()
283 283 tr = self.transaction()
284 284 mm = m1.copy()
285 285 mfm = mf1.copy()
286 286 linkrev = self.changelog.count()
287 287 for f in files:
288 288 try:
289 289 t = self.wread(f)
290 290 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
291 291 r = self.file(f)
292 292 mfm[f] = tm
293 293
294 294 fp1 = m1.get(f, nullid)
295 295 fp2 = m2.get(f, nullid)
296 296
297 297 # is the same revision on two branches of a merge?
298 298 if fp2 == fp1:
299 299 fp2 = nullid
300 300
301 301 if fp2 != nullid:
302 302 # is one parent an ancestor of the other?
303 303 fpa = r.ancestor(fp1, fp2)
304 304 if fpa == fp1:
305 305 fp1, fp2 = fp2, nullid
306 306 elif fpa == fp2:
307 307 fp2 = nullid
308 308
309 309 # is the file unmodified from the parent?
310 310 if t == r.read(fp1):
311 311 # record the proper existing parent in manifest
312 312 # no need to add a revision
313 313 mm[f] = fp1
314 314 continue
315 315
316 316 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
317 317 changed.append(f)
318 318 if update_dirstate:
319 319 self.dirstate.update([f], "n")
320 320 except IOError:
321 321 try:
322 322 del mm[f]
323 323 del mfm[f]
324 324 if update_dirstate:
325 325 self.dirstate.forget([f])
326 326 except:
327 327 # deleted from p2?
328 328 pass
329 329
330 330 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
331 331 user = user or self.ui.username()
332 332 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
333 333 tr.close()
334 334 if update_dirstate:
335 335 self.dirstate.setparents(n, nullid)
336 336
337 337 def commit(self, files = None, text = "", user = None, date = None,
338 338 match = util.always, force=False):
339 339 commit = []
340 340 remove = []
341 341 changed = []
342 342
343 343 if files:
344 344 for f in files:
345 345 s = self.dirstate.state(f)
346 346 if s in 'nmai':
347 347 commit.append(f)
348 348 elif s == 'r':
349 349 remove.append(f)
350 350 else:
351 351 self.ui.warn(_("%s not tracked!\n") % f)
352 352 else:
353 353 (c, a, d, u) = self.changes(match=match)
354 354 commit = c + a
355 355 remove = d
356 356
357 357 p1, p2 = self.dirstate.parents()
358 358 c1 = self.changelog.read(p1)
359 359 c2 = self.changelog.read(p2)
360 360 m1 = self.manifest.read(c1[0])
361 361 mf1 = self.manifest.readflags(c1[0])
362 362 m2 = self.manifest.read(c2[0])
363 363
364 364 if not commit and not remove and not force and p2 == nullid:
365 365 self.ui.status(_("nothing changed\n"))
366 366 return None
367 367
368 368 if not self.hook("precommit"):
369 369 return None
370 370
371 371 wlock = self.wlock()
372 372 lock = self.lock()
373 373 tr = self.transaction()
374 374
375 375 # check in files
376 376 new = {}
377 377 linkrev = self.changelog.count()
378 378 commit.sort()
379 379 for f in commit:
380 380 self.ui.note(f + "\n")
381 381 try:
382 382 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
383 383 t = self.wread(f)
384 384 except IOError:
385 385 self.ui.warn(_("trouble committing %s!\n") % f)
386 386 raise
387 387
388 388 r = self.file(f)
389 389
390 390 meta = {}
391 391 cp = self.dirstate.copied(f)
392 392 if cp:
393 393 meta["copy"] = cp
394 394 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
395 395 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
396 396 fp1, fp2 = nullid, nullid
397 397 else:
398 398 fp1 = m1.get(f, nullid)
399 399 fp2 = m2.get(f, nullid)
400 400
401 401 # is the same revision on two branches of a merge?
402 402 if fp2 == fp1:
403 403 fp2 = nullid
404 404
405 405 if fp2 != nullid:
406 406 # is one parent an ancestor of the other?
407 407 fpa = r.ancestor(fp1, fp2)
408 408 if fpa == fp1:
409 409 fp1, fp2 = fp2, nullid
410 410 elif fpa == fp2:
411 411 fp2 = nullid
412 412
413 413 # is the file unmodified from the parent?
414 414 if not meta and t == r.read(fp1):
415 415 # record the proper existing parent in manifest
416 416 # no need to add a revision
417 417 new[f] = fp1
418 418 continue
419 419
420 420 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
421 421 # remember what we've added so that we can later calculate
422 422 # the files to pull from a set of changesets
423 423 changed.append(f)
424 424
425 425 # update manifest
426 426 m1.update(new)
427 427 for f in remove:
428 428 if f in m1:
429 429 del m1[f]
430 430 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
431 431 (new, remove))
432 432
433 433 # add changeset
434 434 new = new.keys()
435 435 new.sort()
436 436
437 437 if not text:
438 438 edittext = ""
439 439 if p2 != nullid:
440 440 edittext += "HG: branch merge\n"
441 441 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
442 442 edittext += "".join(["HG: changed %s\n" % f for f in changed])
443 443 edittext += "".join(["HG: removed %s\n" % f for f in remove])
444 444 if not changed and not remove:
445 445 edittext += "HG: no files changed\n"
446 446 edittext = self.ui.edit(edittext)
447 447 if not edittext.rstrip():
448 448 return None
449 449 text = edittext
450 450
451 451 user = user or self.ui.username()
452 452 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
453 453 tr.close()
454 454
455 455 self.dirstate.setparents(n)
456 456 self.dirstate.update(new, "n")
457 457 self.dirstate.forget(remove)
458 458
459 459 if not self.hook("commit", node=hex(n)):
460 460 return None
461 461 return n
462 462
463 463 def walk(self, node=None, files=[], match=util.always):
464 464 if node:
465 465 for fn in self.manifest.read(self.changelog.read(node)[0]):
466 466 if match(fn): yield 'm', fn
467 467 else:
468 468 for src, fn in self.dirstate.walk(files, match):
469 469 yield src, fn
470 470
471 471 def changes(self, node1 = None, node2 = None, files = [],
472 472 match = util.always):
473 473 mf2, u = None, []
474 474
475 475 def fcmp(fn, mf):
476 476 t1 = self.wread(fn)
477 477 t2 = self.file(fn).read(mf.get(fn, nullid))
478 478 return cmp(t1, t2)
479 479
480 480 def mfmatches(node):
481 481 mf = dict(self.manifest.read(node))
482 482 for fn in mf.keys():
483 483 if not match(fn):
484 484 del mf[fn]
485 485 return mf
486 486
487 487 # are we comparing the working directory?
488 488 if not node2:
489 489 try:
490 490 wlock = self.wlock(wait=0)
491 491 except lock.LockHeld:
492 492 wlock = None
493 493 l, c, a, d, u = self.dirstate.changes(files, match)
494 494
495 495 # are we comparing working dir against its parent?
496 496 if not node1:
497 497 if l:
498 498 # do a full compare of any files that might have changed
499 499 change = self.changelog.read(self.dirstate.parents()[0])
500 500 mf2 = mfmatches(change[0])
501 501 for f in l:
502 502 if fcmp(f, mf2):
503 503 c.append(f)
504 504 elif wlock is not None:
505 505 self.dirstate.update([f], "n")
506 506
507 507 for l in c, a, d, u:
508 508 l.sort()
509 509
510 510 return (c, a, d, u)
511 511
512 512 # are we comparing working dir against non-tip?
513 513 # generate a pseudo-manifest for the working dir
514 514 if not node2:
515 515 if not mf2:
516 516 change = self.changelog.read(self.dirstate.parents()[0])
517 517 mf2 = mfmatches(change[0])
518 518 for f in a + c + l:
519 519 mf2[f] = ""
520 520 for f in d:
521 521 if f in mf2: del mf2[f]
522 522 else:
523 523 change = self.changelog.read(node2)
524 524 mf2 = mfmatches(change[0])
525 525
526 526 # flush lists from dirstate before comparing manifests
527 527 c, a = [], []
528 528
529 529 change = self.changelog.read(node1)
530 530 mf1 = mfmatches(change[0])
531 531
532 532 for fn in mf2:
533 533 if mf1.has_key(fn):
534 534 if mf1[fn] != mf2[fn]:
535 535 if mf2[fn] != "" or fcmp(fn, mf1):
536 536 c.append(fn)
537 537 del mf1[fn]
538 538 else:
539 539 a.append(fn)
540 540
541 541 d = mf1.keys()
542 542
543 543 for l in c, a, d, u:
544 544 l.sort()
545 545
546 546 return (c, a, d, u)
547 547
548 548 def add(self, list):
549 549 wlock = self.wlock()
550 550 for f in list:
551 551 p = self.wjoin(f)
552 552 if not os.path.exists(p):
553 553 self.ui.warn(_("%s does not exist!\n") % f)
554 554 elif not os.path.isfile(p):
555 555 self.ui.warn(_("%s not added: only files supported currently\n") % f)
556 556 elif self.dirstate.state(f) in 'an':
557 557 self.ui.warn(_("%s already tracked!\n") % f)
558 558 else:
559 559 self.dirstate.update([f], "a")
560 560
561 561 def forget(self, list):
562 562 wlock = self.wlock()
563 563 for f in list:
564 564 if self.dirstate.state(f) not in 'ai':
565 565 self.ui.warn(_("%s not added!\n") % f)
566 566 else:
567 567 self.dirstate.forget([f])
568 568
569 569 def remove(self, list, unlink=False):
570 570 if unlink:
571 571 for f in list:
572 572 try:
573 573 util.unlink(self.wjoin(f))
574 574 except OSError, inst:
575 575 if inst.errno != errno.ENOENT: raise
576 576 wlock = self.wlock()
577 577 for f in list:
578 578 p = self.wjoin(f)
579 579 if os.path.exists(p):
580 580 self.ui.warn(_("%s still exists!\n") % f)
581 581 elif self.dirstate.state(f) == 'a':
582 582 self.ui.warn(_("%s never committed!\n") % f)
583 583 self.dirstate.forget([f])
584 584 elif f not in self.dirstate:
585 585 self.ui.warn(_("%s not tracked!\n") % f)
586 586 else:
587 587 self.dirstate.update([f], "r")
588 588
589 589 def undelete(self, list):
590 590 p = self.dirstate.parents()[0]
591 591 mn = self.changelog.read(p)[0]
592 592 mf = self.manifest.readflags(mn)
593 593 m = self.manifest.read(mn)
594 594 wlock = self.wlock()
595 595 for f in list:
596 596 if self.dirstate.state(f) not in "r":
597 597 self.ui.warn("%s not removed!\n" % f)
598 598 else:
599 599 t = self.file(f).read(m[f])
600 600 self.wwrite(f, t)
601 601 util.set_exec(self.wjoin(f), mf[f])
602 602 self.dirstate.update([f], "n")
603 603
604 604 def copy(self, source, dest):
605 605 p = self.wjoin(dest)
606 606 if not os.path.exists(p):
607 607 self.ui.warn(_("%s does not exist!\n") % dest)
608 608 elif not os.path.isfile(p):
609 609 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
610 610 else:
611 611 wlock = self.wlock()
612 612 if self.dirstate.state(dest) == '?':
613 613 self.dirstate.update([dest], "a")
614 614 self.dirstate.copy(source, dest)
615 615
616 def heads(self, start=nullid):
616 def heads(self, start=None):
617 617 heads = self.changelog.heads(start)
618 618 # sort the output in rev descending order
619 619 heads = [(-self.changelog.rev(h), h) for h in heads]
620 620 heads.sort()
621 621 return [n for (r, n) in heads]
622 622
623 623 # branchlookup returns a dict giving a list of branches for
624 624 # each head. A branch is defined as the tag of a node or
625 625 # the branch of the node's parents. If a node has multiple
626 626 # branch tags, tags are eliminated if they are visible from other
627 627 # branch tags.
628 628 #
629 629 # So, for this graph: a->b->c->d->e
630 630 # \ /
631 631 # aa -----/
632 632 # a has tag 2.6.12
633 633 # d has tag 2.6.13
634 634 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
635 635 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
636 636 # from the list.
637 637 #
638 638 # It is possible that more than one head will have the same branch tag.
639 639 # callers need to check the result for multiple heads under the same
640 640 # branch tag if that is a problem for them (ie checkout of a specific
641 641 # branch).
642 642 #
643 643 # passing in a specific branch will limit the depth of the search
644 644 # through the parents. It won't limit the branches returned in the
645 645 # result though.
646 646 def branchlookup(self, heads=None, branch=None):
647 647 if not heads:
648 648 heads = self.heads()
649 649 headt = [ h for h in heads ]
650 650 chlog = self.changelog
651 651 branches = {}
652 652 merges = []
653 653 seenmerge = {}
654 654
655 655 # traverse the tree once for each head, recording in the branches
656 656 # dict which tags are visible from this head. The branches
657 657 # dict also records which tags are visible from each tag
658 658 # while we traverse.
659 659 while headt or merges:
660 660 if merges:
661 661 n, found = merges.pop()
662 662 visit = [n]
663 663 else:
664 664 h = headt.pop()
665 665 visit = [h]
666 666 found = [h]
667 667 seen = {}
668 668 while visit:
669 669 n = visit.pop()
670 670 if n in seen:
671 671 continue
672 672 pp = chlog.parents(n)
673 673 tags = self.nodetags(n)
674 674 if tags:
675 675 for x in tags:
676 676 if x == 'tip':
677 677 continue
678 678 for f in found:
679 679 branches.setdefault(f, {})[n] = 1
680 680 branches.setdefault(n, {})[n] = 1
681 681 break
682 682 if n not in found:
683 683 found.append(n)
684 684 if branch in tags:
685 685 continue
686 686 seen[n] = 1
687 687 if pp[1] != nullid and n not in seenmerge:
688 688 merges.append((pp[1], [x for x in found]))
689 689 seenmerge[n] = 1
690 690 if pp[0] != nullid:
691 691 visit.append(pp[0])
692 692 # traverse the branches dict, eliminating branch tags from each
693 693 # head that are visible from another branch tag for that head.
694 694 out = {}
695 695 viscache = {}
696 696 for h in heads:
697 697 def visible(node):
698 698 if node in viscache:
699 699 return viscache[node]
700 700 ret = {}
701 701 visit = [node]
702 702 while visit:
703 703 x = visit.pop()
704 704 if x in viscache:
705 705 ret.update(viscache[x])
706 706 elif x not in ret:
707 707 ret[x] = 1
708 708 if x in branches:
709 709 visit[len(visit):] = branches[x].keys()
710 710 viscache[node] = ret
711 711 return ret
712 712 if h not in branches:
713 713 continue
714 714 # O(n^2), but somewhat limited. This only searches the
715 715 # tags visible from a specific head, not all the tags in the
716 716 # whole repo.
717 717 for b in branches[h]:
718 718 vis = False
719 719 for bb in branches[h].keys():
720 720 if b != bb:
721 721 if b in visible(bb):
722 722 vis = True
723 723 break
724 724 if not vis:
725 725 l = out.setdefault(h, [])
726 726 l[len(l):] = self.nodetags(b)
727 727 return out
728 728
729 729 def branches(self, nodes):
730 730 if not nodes: nodes = [self.changelog.tip()]
731 731 b = []
732 732 for n in nodes:
733 733 t = n
734 734 while n:
735 735 p = self.changelog.parents(n)
736 736 if p[1] != nullid or p[0] == nullid:
737 737 b.append((t, n, p[0], p[1]))
738 738 break
739 739 n = p[0]
740 740 return b
741 741
742 742 def between(self, pairs):
743 743 r = []
744 744
745 745 for top, bottom in pairs:
746 746 n, l, i = top, [], 0
747 747 f = 1
748 748
749 749 while n != bottom:
750 750 p = self.changelog.parents(n)[0]
751 751 if i == f:
752 752 l.append(n)
753 753 f = f * 2
754 754 n = p
755 755 i += 1
756 756
757 757 r.append(l)
758 758
759 759 return r
760 760
761 761 def findincoming(self, remote, base=None, heads=None):
762 762 m = self.changelog.nodemap
763 763 search = []
764 764 fetch = {}
765 765 seen = {}
766 766 seenbranch = {}
767 767 if base == None:
768 768 base = {}
769 769
770 770 # assume we're closer to the tip than the root
771 771 # and start by examining the heads
772 772 self.ui.status(_("searching for changes\n"))
773 773
774 774 if not heads:
775 775 heads = remote.heads()
776 776
777 777 unknown = []
778 778 for h in heads:
779 779 if h not in m:
780 780 unknown.append(h)
781 781 else:
782 782 base[h] = 1
783 783
784 784 if not unknown:
785 785 return None
786 786
787 787 rep = {}
788 788 reqcnt = 0
789 789
790 790 # search through remote branches
791 791 # a 'branch' here is a linear segment of history, with four parts:
792 792 # head, root, first parent, second parent
793 793 # (a branch always has two parents (or none) by definition)
794 794 unknown = remote.branches(unknown)
795 795 while unknown:
796 796 r = []
797 797 while unknown:
798 798 n = unknown.pop(0)
799 799 if n[0] in seen:
800 800 continue
801 801
802 802 self.ui.debug(_("examining %s:%s\n") % (short(n[0]), short(n[1])))
803 803 if n[0] == nullid:
804 804 break
805 805 if n in seenbranch:
806 806 self.ui.debug(_("branch already found\n"))
807 807 continue
808 808 if n[1] and n[1] in m: # do we know the base?
809 809 self.ui.debug(_("found incomplete branch %s:%s\n")
810 810 % (short(n[0]), short(n[1])))
811 811 search.append(n) # schedule branch range for scanning
812 812 seenbranch[n] = 1
813 813 else:
814 814 if n[1] not in seen and n[1] not in fetch:
815 815 if n[2] in m and n[3] in m:
816 816 self.ui.debug(_("found new changeset %s\n") %
817 817 short(n[1]))
818 818 fetch[n[1]] = 1 # earliest unknown
819 819 base[n[2]] = 1 # latest known
820 820 continue
821 821
822 822 for a in n[2:4]:
823 823 if a not in rep:
824 824 r.append(a)
825 825 rep[a] = 1
826 826
827 827 seen[n[0]] = 1
828 828
829 829 if r:
830 830 reqcnt += 1
831 831 self.ui.debug(_("request %d: %s\n") %
832 832 (reqcnt, " ".join(map(short, r))))
833 833 for p in range(0, len(r), 10):
834 834 for b in remote.branches(r[p:p+10]):
835 835 self.ui.debug(_("received %s:%s\n") %
836 836 (short(b[0]), short(b[1])))
837 837 if b[0] in m:
838 838 self.ui.debug(_("found base node %s\n") % short(b[0]))
839 839 base[b[0]] = 1
840 840 elif b[0] not in seen:
841 841 unknown.append(b)
842 842
843 843 # do binary search on the branches we found
844 844 while search:
845 845 n = search.pop(0)
846 846 reqcnt += 1
847 847 l = remote.between([(n[0], n[1])])[0]
848 848 l.append(n[1])
849 849 p = n[0]
850 850 f = 1
851 851 for i in l:
852 852 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
853 853 if i in m:
854 854 if f <= 2:
855 855 self.ui.debug(_("found new branch changeset %s\n") %
856 856 short(p))
857 857 fetch[p] = 1
858 858 base[i] = 1
859 859 else:
860 860 self.ui.debug(_("narrowed branch search to %s:%s\n")
861 861 % (short(p), short(i)))
862 862 search.append((p, i))
863 863 break
864 864 p, f = i, f * 2
865 865
866 866 # sanity check our fetch list
867 867 for f in fetch.keys():
868 868 if f in m:
869 869 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
870 870
871 871 if base.keys() == [nullid]:
872 872 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
873 873
874 874 self.ui.note(_("found new changesets starting at ") +
875 875 " ".join([short(f) for f in fetch]) + "\n")
876 876
877 877 self.ui.debug(_("%d total queries\n") % reqcnt)
878 878
879 879 return fetch.keys()
880 880
881 881 def findoutgoing(self, remote, base=None, heads=None):
882 882 if base == None:
883 883 base = {}
884 884 self.findincoming(remote, base, heads)
885 885
886 886 self.ui.debug(_("common changesets up to ")
887 887 + " ".join(map(short, base.keys())) + "\n")
888 888
889 889 remain = dict.fromkeys(self.changelog.nodemap)
890 890
891 891 # prune everything remote has from the tree
892 892 del remain[nullid]
893 893 remove = base.keys()
894 894 while remove:
895 895 n = remove.pop(0)
896 896 if n in remain:
897 897 del remain[n]
898 898 for p in self.changelog.parents(n):
899 899 remove.append(p)
900 900
901 901 # find every node whose parents have been pruned
902 902 subset = []
903 903 for n in remain:
904 904 p1, p2 = self.changelog.parents(n)
905 905 if p1 not in remain and p2 not in remain:
906 906 subset.append(n)
907 907
908 908 # this is the set of all roots we have to push
909 909 return subset
910 910
911 911 def pull(self, remote, heads = None):
912 912 lock = self.lock()
913 913
914 914 # if we have an empty repo, fetch everything
915 915 if self.changelog.tip() == nullid:
916 916 self.ui.status(_("requesting all changes\n"))
917 917 fetch = [nullid]
918 918 else:
919 919 fetch = self.findincoming(remote)
920 920
921 921 if not fetch:
922 922 self.ui.status(_("no changes found\n"))
923 923 return 1
924 924
925 925 if heads is None:
926 926 cg = remote.changegroup(fetch)
927 927 else:
928 928 cg = remote.changegroupsubset(fetch, heads)
929 929 return self.addchangegroup(cg)
930 930
931 931 def push(self, remote, force=False):
932 932 lock = remote.lock()
933 933
934 934 base = {}
935 935 heads = remote.heads()
936 936 inc = self.findincoming(remote, base, heads)
937 937 if not force and inc:
938 938 self.ui.warn(_("abort: unsynced remote changes!\n"))
939 939 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
940 940 return 1
941 941
942 942 update = self.findoutgoing(remote, base)
943 943 if not update:
944 944 self.ui.status(_("no changes found\n"))
945 945 return 1
946 946 elif not force:
947 947 if len(heads) < len(self.changelog.heads()):
948 948 self.ui.warn(_("abort: push creates new remote branches!\n"))
949 949 self.ui.status(_("(did you forget to merge?"
950 950 " use push -f to force)\n"))
951 951 return 1
952 952
953 953 cg = self.changegroup(update)
954 954 return remote.addchangegroup(cg)
955 955
956 956 def changegroupsubset(self, bases, heads):
957 957 """This function generates a changegroup consisting of all the nodes
958 958 that are descendents of any of the bases, and ancestors of any of
959 959 the heads.
960 960
961 961 It is fairly complex as determining which filenodes and which
962 962 manifest nodes need to be included for the changeset to be complete
963 963 is non-trivial.
964 964
965 965 Another wrinkle is doing the reverse, figuring out which changeset in
966 966 the changegroup a particular filenode or manifestnode belongs to."""
967 967
968 968 # Set up some initial variables
969 969 # Make it easy to refer to self.changelog
970 970 cl = self.changelog
971 971 # msng is short for missing - compute the list of changesets in this
972 972 # changegroup.
973 973 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
974 974 # Some bases may turn out to be superfluous, and some heads may be
975 975 # too. nodesbetween will return the minimal set of bases and heads
976 976 # necessary to re-create the changegroup.
977 977
978 978 # Known heads are the list of heads that it is assumed the recipient
979 979 # of this changegroup will know about.
980 980 knownheads = {}
981 981 # We assume that all parents of bases are known heads.
982 982 for n in bases:
983 983 for p in cl.parents(n):
984 984 if p != nullid:
985 985 knownheads[p] = 1
986 986 knownheads = knownheads.keys()
987 987 if knownheads:
988 988 # Now that we know what heads are known, we can compute which
989 989 # changesets are known. The recipient must know about all
990 990 # changesets required to reach the known heads from the null
991 991 # changeset.
992 992 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
993 993 junk = None
994 994 # Transform the list into an ersatz set.
995 995 has_cl_set = dict.fromkeys(has_cl_set)
996 996 else:
997 997 # If there were no known heads, the recipient cannot be assumed to
998 998 # know about any changesets.
999 999 has_cl_set = {}
1000 1000
1001 1001 # Make it easy to refer to self.manifest
1002 1002 mnfst = self.manifest
1003 1003 # We don't know which manifests are missing yet
1004 1004 msng_mnfst_set = {}
1005 1005 # Nor do we know which filenodes are missing.
1006 1006 msng_filenode_set = {}
1007 1007
1008 1008 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1009 1009 junk = None
1010 1010
1011 1011 # A changeset always belongs to itself, so the changenode lookup
1012 1012 # function for a changenode is identity.
1013 1013 def identity(x):
1014 1014 return x
1015 1015
1016 1016 # A function generating function. Sets up an environment for the
1017 1017 # inner function.
1018 1018 def cmp_by_rev_func(revlog):
1019 1019 # Compare two nodes by their revision number in the environment's
1020 1020 # revision history. Since the revision number both represents the
1021 1021 # most efficient order to read the nodes in, and represents a
1022 1022 # topological sorting of the nodes, this function is often useful.
1023 1023 def cmp_by_rev(a, b):
1024 1024 return cmp(revlog.rev(a), revlog.rev(b))
1025 1025 return cmp_by_rev
1026 1026
1027 1027 # If we determine that a particular file or manifest node must be a
1028 1028 # node that the recipient of the changegroup will already have, we can
1029 1029 # also assume the recipient will have all the parents. This function
1030 1030 # prunes them from the set of missing nodes.
1031 1031 def prune_parents(revlog, hasset, msngset):
1032 1032 haslst = hasset.keys()
1033 1033 haslst.sort(cmp_by_rev_func(revlog))
1034 1034 for node in haslst:
1035 1035 parentlst = [p for p in revlog.parents(node) if p != nullid]
1036 1036 while parentlst:
1037 1037 n = parentlst.pop()
1038 1038 if n not in hasset:
1039 1039 hasset[n] = 1
1040 1040 p = [p for p in revlog.parents(n) if p != nullid]
1041 1041 parentlst.extend(p)
1042 1042 for n in hasset:
1043 1043 msngset.pop(n, None)
1044 1044
1045 1045 # This is a function generating function used to set up an environment
1046 1046 # for the inner function to execute in.
1047 1047 def manifest_and_file_collector(changedfileset):
1048 1048 # This is an information gathering function that gathers
1049 1049 # information from each changeset node that goes out as part of
1050 1050 # the changegroup. The information gathered is a list of which
1051 1051 # manifest nodes are potentially required (the recipient may
1052 1052 # already have them) and total list of all files which were
1053 1053 # changed in any changeset in the changegroup.
1054 1054 #
1055 1055 # We also remember the first changenode we saw any manifest
1056 1056 # referenced by so we can later determine which changenode 'owns'
1057 1057 # the manifest.
1058 1058 def collect_manifests_and_files(clnode):
1059 1059 c = cl.read(clnode)
1060 1060 for f in c[3]:
1061 1061 # This is to make sure we only have one instance of each
1062 1062 # filename string for each filename.
1063 1063 changedfileset.setdefault(f, f)
1064 1064 msng_mnfst_set.setdefault(c[0], clnode)
1065 1065 return collect_manifests_and_files
1066 1066
1067 1067 # Figure out which manifest nodes (of the ones we think might be part
1068 1068 # of the changegroup) the recipient must know about and remove them
1069 1069 # from the changegroup.
1070 1070 def prune_manifests():
1071 1071 has_mnfst_set = {}
1072 1072 for n in msng_mnfst_set:
1073 1073 # If a 'missing' manifest thinks it belongs to a changenode
1074 1074 # the recipient is assumed to have, obviously the recipient
1075 1075 # must have that manifest.
1076 1076 linknode = cl.node(mnfst.linkrev(n))
1077 1077 if linknode in has_cl_set:
1078 1078 has_mnfst_set[n] = 1
1079 1079 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1080 1080
1081 1081 # Use the information collected in collect_manifests_and_files to say
1082 1082 # which changenode any manifestnode belongs to.
1083 1083 def lookup_manifest_link(mnfstnode):
1084 1084 return msng_mnfst_set[mnfstnode]
1085 1085
1086 1086 # A function generating function that sets up the initial environment
1087 1087 # the inner function.
1088 1088 def filenode_collector(changedfiles):
1089 1089 next_rev = [0]
1090 1090 # This gathers information from each manifestnode included in the
1091 1091 # changegroup about which filenodes the manifest node references
1092 1092 # so we can include those in the changegroup too.
1093 1093 #
1094 1094 # It also remembers which changenode each filenode belongs to. It
1095 1095 # does this by assuming the a filenode belongs to the changenode
1096 1096 # the first manifest that references it belongs to.
1097 1097 def collect_msng_filenodes(mnfstnode):
1098 1098 r = mnfst.rev(mnfstnode)
1099 1099 if r == next_rev[0]:
1100 1100 # If the last rev we looked at was the one just previous,
1101 1101 # we only need to see a diff.
1102 1102 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1103 1103 # For each line in the delta
1104 1104 for dline in delta.splitlines():
1105 1105 # get the filename and filenode for that line
1106 1106 f, fnode = dline.split('\0')
1107 1107 fnode = bin(fnode[:40])
1108 1108 f = changedfiles.get(f, None)
1109 1109 # And if the file is in the list of files we care
1110 1110 # about.
1111 1111 if f is not None:
1112 1112 # Get the changenode this manifest belongs to
1113 1113 clnode = msng_mnfst_set[mnfstnode]
1114 1114 # Create the set of filenodes for the file if
1115 1115 # there isn't one already.
1116 1116 ndset = msng_filenode_set.setdefault(f, {})
1117 1117 # And set the filenode's changelog node to the
1118 1118 # manifest's if it hasn't been set already.
1119 1119 ndset.setdefault(fnode, clnode)
1120 1120 else:
1121 1121 # Otherwise we need a full manifest.
1122 1122 m = mnfst.read(mnfstnode)
1123 1123 # For every file in we care about.
1124 1124 for f in changedfiles:
1125 1125 fnode = m.get(f, None)
1126 1126 # If it's in the manifest
1127 1127 if fnode is not None:
1128 1128 # See comments above.
1129 1129 clnode = msng_mnfst_set[mnfstnode]
1130 1130 ndset = msng_filenode_set.setdefault(f, {})
1131 1131 ndset.setdefault(fnode, clnode)
1132 1132 # Remember the revision we hope to see next.
1133 1133 next_rev[0] = r + 1
1134 1134 return collect_msng_filenodes
1135 1135
1136 1136 # We have a list of filenodes we think we need for a file, lets remove
1137 1137 # all those we now the recipient must have.
1138 1138 def prune_filenodes(f, filerevlog):
1139 1139 msngset = msng_filenode_set[f]
1140 1140 hasset = {}
1141 1141 # If a 'missing' filenode thinks it belongs to a changenode we
1142 1142 # assume the recipient must have, then the recipient must have
1143 1143 # that filenode.
1144 1144 for n in msngset:
1145 1145 clnode = cl.node(filerevlog.linkrev(n))
1146 1146 if clnode in has_cl_set:
1147 1147 hasset[n] = 1
1148 1148 prune_parents(filerevlog, hasset, msngset)
1149 1149
1150 1150 # A function generator function that sets up the a context for the
1151 1151 # inner function.
1152 1152 def lookup_filenode_link_func(fname):
1153 1153 msngset = msng_filenode_set[fname]
1154 1154 # Lookup the changenode the filenode belongs to.
1155 1155 def lookup_filenode_link(fnode):
1156 1156 return msngset[fnode]
1157 1157 return lookup_filenode_link
1158 1158
1159 1159 # Now that we have all theses utility functions to help out and
1160 1160 # logically divide up the task, generate the group.
1161 1161 def gengroup():
1162 1162 # The set of changed files starts empty.
1163 1163 changedfiles = {}
1164 1164 # Create a changenode group generator that will call our functions
1165 1165 # back to lookup the owning changenode and collect information.
1166 1166 group = cl.group(msng_cl_lst, identity,
1167 1167 manifest_and_file_collector(changedfiles))
1168 1168 for chnk in group:
1169 1169 yield chnk
1170 1170
1171 1171 # The list of manifests has been collected by the generator
1172 1172 # calling our functions back.
1173 1173 prune_manifests()
1174 1174 msng_mnfst_lst = msng_mnfst_set.keys()
1175 1175 # Sort the manifestnodes by revision number.
1176 1176 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1177 1177 # Create a generator for the manifestnodes that calls our lookup
1178 1178 # and data collection functions back.
1179 1179 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1180 1180 filenode_collector(changedfiles))
1181 1181 for chnk in group:
1182 1182 yield chnk
1183 1183
1184 1184 # These are no longer needed, dereference and toss the memory for
1185 1185 # them.
1186 1186 msng_mnfst_lst = None
1187 1187 msng_mnfst_set.clear()
1188 1188
1189 1189 changedfiles = changedfiles.keys()
1190 1190 changedfiles.sort()
1191 1191 # Go through all our files in order sorted by name.
1192 1192 for fname in changedfiles:
1193 1193 filerevlog = self.file(fname)
1194 1194 # Toss out the filenodes that the recipient isn't really
1195 1195 # missing.
1196 1196 prune_filenodes(fname, filerevlog)
1197 1197 msng_filenode_lst = msng_filenode_set[fname].keys()
1198 1198 # If any filenodes are left, generate the group for them,
1199 1199 # otherwise don't bother.
1200 1200 if len(msng_filenode_lst) > 0:
1201 1201 yield struct.pack(">l", len(fname) + 4) + fname
1202 1202 # Sort the filenodes by their revision #
1203 1203 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1204 1204 # Create a group generator and only pass in a changenode
1205 1205 # lookup function as we need to collect no information
1206 1206 # from filenodes.
1207 1207 group = filerevlog.group(msng_filenode_lst,
1208 1208 lookup_filenode_link_func(fname))
1209 1209 for chnk in group:
1210 1210 yield chnk
1211 1211 # Don't need this anymore, toss it to free memory.
1212 1212 del msng_filenode_set[fname]
1213 1213 # Signal that no more groups are left.
1214 1214 yield struct.pack(">l", 0)
1215 1215
1216 1216 return util.chunkbuffer(gengroup())
1217 1217
1218 1218 def changegroup(self, basenodes):
1219 1219 """Generate a changegroup of all nodes that we have that a recipient
1220 1220 doesn't.
1221 1221
1222 1222 This is much easier than the previous function as we can assume that
1223 1223 the recipient has any changenode we aren't sending them."""
1224 1224 cl = self.changelog
1225 1225 nodes = cl.nodesbetween(basenodes, None)[0]
1226 1226 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1227 1227
1228 1228 def identity(x):
1229 1229 return x
1230 1230
1231 1231 def gennodelst(revlog):
1232 1232 for r in xrange(0, revlog.count()):
1233 1233 n = revlog.node(r)
1234 1234 if revlog.linkrev(n) in revset:
1235 1235 yield n
1236 1236
1237 1237 def changed_file_collector(changedfileset):
1238 1238 def collect_changed_files(clnode):
1239 1239 c = cl.read(clnode)
1240 1240 for fname in c[3]:
1241 1241 changedfileset[fname] = 1
1242 1242 return collect_changed_files
1243 1243
1244 1244 def lookuprevlink_func(revlog):
1245 1245 def lookuprevlink(n):
1246 1246 return cl.node(revlog.linkrev(n))
1247 1247 return lookuprevlink
1248 1248
1249 1249 def gengroup():
1250 1250 # construct a list of all changed files
1251 1251 changedfiles = {}
1252 1252
1253 1253 for chnk in cl.group(nodes, identity,
1254 1254 changed_file_collector(changedfiles)):
1255 1255 yield chnk
1256 1256 changedfiles = changedfiles.keys()
1257 1257 changedfiles.sort()
1258 1258
1259 1259 mnfst = self.manifest
1260 1260 nodeiter = gennodelst(mnfst)
1261 1261 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1262 1262 yield chnk
1263 1263
1264 1264 for fname in changedfiles:
1265 1265 filerevlog = self.file(fname)
1266 1266 nodeiter = gennodelst(filerevlog)
1267 1267 nodeiter = list(nodeiter)
1268 1268 if nodeiter:
1269 1269 yield struct.pack(">l", len(fname) + 4) + fname
1270 1270 lookup = lookuprevlink_func(filerevlog)
1271 1271 for chnk in filerevlog.group(nodeiter, lookup):
1272 1272 yield chnk
1273 1273
1274 1274 yield struct.pack(">l", 0)
1275 1275
1276 1276 return util.chunkbuffer(gengroup())
1277 1277
1278 1278 def addchangegroup(self, source):
1279 1279
1280 1280 def getchunk():
1281 1281 d = source.read(4)
1282 1282 if not d: return ""
1283 1283 l = struct.unpack(">l", d)[0]
1284 1284 if l <= 4: return ""
1285 1285 d = source.read(l - 4)
1286 1286 if len(d) < l - 4:
1287 1287 raise repo.RepoError(_("premature EOF reading chunk"
1288 1288 " (got %d bytes, expected %d)")
1289 1289 % (len(d), l - 4))
1290 1290 return d
1291 1291
1292 1292 def getgroup():
1293 1293 while 1:
1294 1294 c = getchunk()
1295 1295 if not c: break
1296 1296 yield c
1297 1297
1298 1298 def csmap(x):
1299 1299 self.ui.debug(_("add changeset %s\n") % short(x))
1300 1300 return self.changelog.count()
1301 1301
1302 1302 def revmap(x):
1303 1303 return self.changelog.rev(x)
1304 1304
1305 1305 if not source: return
1306 1306 changesets = files = revisions = 0
1307 1307
1308 1308 tr = self.transaction()
1309 1309
1310 1310 oldheads = len(self.changelog.heads())
1311 1311
1312 1312 # pull off the changeset group
1313 1313 self.ui.status(_("adding changesets\n"))
1314 1314 co = self.changelog.tip()
1315 1315 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1316 1316 cnr, cor = map(self.changelog.rev, (cn, co))
1317 1317 if cn == nullid:
1318 1318 cnr = cor
1319 1319 changesets = cnr - cor
1320 1320
1321 1321 # pull off the manifest group
1322 1322 self.ui.status(_("adding manifests\n"))
1323 1323 mm = self.manifest.tip()
1324 1324 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1325 1325
1326 1326 # process the files
1327 1327 self.ui.status(_("adding file changes\n"))
1328 1328 while 1:
1329 1329 f = getchunk()
1330 1330 if not f: break
1331 1331 self.ui.debug(_("adding %s revisions\n") % f)
1332 1332 fl = self.file(f)
1333 1333 o = fl.count()
1334 1334 n = fl.addgroup(getgroup(), revmap, tr)
1335 1335 revisions += fl.count() - o
1336 1336 files += 1
1337 1337
1338 1338 newheads = len(self.changelog.heads())
1339 1339 heads = ""
1340 1340 if oldheads and newheads > oldheads:
1341 1341 heads = _(" (+%d heads)") % (newheads - oldheads)
1342 1342
1343 1343 self.ui.status(_("added %d changesets"
1344 1344 " with %d changes to %d files%s\n")
1345 1345 % (changesets, revisions, files, heads))
1346 1346
1347 1347 tr.close()
1348 1348
1349 1349 if changesets > 0:
1350 1350 if not self.hook("changegroup",
1351 1351 node=hex(self.changelog.node(cor+1))):
1352 1352 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1353 1353 return 1
1354 1354
1355 1355 for i in range(cor + 1, cnr + 1):
1356 1356 self.hook("commit", node=hex(self.changelog.node(i)))
1357 1357
1358 1358 return
1359 1359
1360 1360 def update(self, node, allow=False, force=False, choose=None,
1361 1361 moddirstate=True):
1362 1362 pl = self.dirstate.parents()
1363 1363 if not force and pl[1] != nullid:
1364 1364 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1365 1365 return 1
1366 1366
1367 1367 p1, p2 = pl[0], node
1368 1368 pa = self.changelog.ancestor(p1, p2)
1369 1369 m1n = self.changelog.read(p1)[0]
1370 1370 m2n = self.changelog.read(p2)[0]
1371 1371 man = self.manifest.ancestor(m1n, m2n)
1372 1372 m1 = self.manifest.read(m1n)
1373 1373 mf1 = self.manifest.readflags(m1n)
1374 1374 m2 = self.manifest.read(m2n)
1375 1375 mf2 = self.manifest.readflags(m2n)
1376 1376 ma = self.manifest.read(man)
1377 1377 mfa = self.manifest.readflags(man)
1378 1378
1379 1379 (c, a, d, u) = self.changes()
1380 1380
1381 1381 # is this a jump, or a merge? i.e. is there a linear path
1382 1382 # from p1 to p2?
1383 1383 linear_path = (pa == p1 or pa == p2)
1384 1384
1385 1385 # resolve the manifest to determine which files
1386 1386 # we care about merging
1387 1387 self.ui.note(_("resolving manifests\n"))
1388 1388 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1389 1389 (force, allow, moddirstate, linear_path))
1390 1390 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1391 1391 (short(man), short(m1n), short(m2n)))
1392 1392
1393 1393 merge = {}
1394 1394 get = {}
1395 1395 remove = []
1396 1396
1397 1397 # construct a working dir manifest
1398 1398 mw = m1.copy()
1399 1399 mfw = mf1.copy()
1400 1400 umap = dict.fromkeys(u)
1401 1401
1402 1402 for f in a + c + u:
1403 1403 mw[f] = ""
1404 1404 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1405 1405
1406 1406 if moddirstate:
1407 1407 wlock = self.wlock()
1408 1408
1409 1409 for f in d:
1410 1410 if f in mw: del mw[f]
1411 1411
1412 1412 # If we're jumping between revisions (as opposed to merging),
1413 1413 # and if neither the working directory nor the target rev has
1414 1414 # the file, then we need to remove it from the dirstate, to
1415 1415 # prevent the dirstate from listing the file when it is no
1416 1416 # longer in the manifest.
1417 1417 if moddirstate and linear_path and f not in m2:
1418 1418 self.dirstate.forget((f,))
1419 1419
1420 1420 # Compare manifests
1421 1421 for f, n in mw.iteritems():
1422 1422 if choose and not choose(f): continue
1423 1423 if f in m2:
1424 1424 s = 0
1425 1425
1426 1426 # is the wfile new since m1, and match m2?
1427 1427 if f not in m1:
1428 1428 t1 = self.wread(f)
1429 1429 t2 = self.file(f).read(m2[f])
1430 1430 if cmp(t1, t2) == 0:
1431 1431 n = m2[f]
1432 1432 del t1, t2
1433 1433
1434 1434 # are files different?
1435 1435 if n != m2[f]:
1436 1436 a = ma.get(f, nullid)
1437 1437 # are both different from the ancestor?
1438 1438 if n != a and m2[f] != a:
1439 1439 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1440 1440 # merge executable bits
1441 1441 # "if we changed or they changed, change in merge"
1442 1442 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1443 1443 mode = ((a^b) | (a^c)) ^ a
1444 1444 merge[f] = (m1.get(f, nullid), m2[f], mode)
1445 1445 s = 1
1446 1446 # are we clobbering?
1447 1447 # is remote's version newer?
1448 1448 # or are we going back in time?
1449 1449 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1450 1450 self.ui.debug(_(" remote %s is newer, get\n") % f)
1451 1451 get[f] = m2[f]
1452 1452 s = 1
1453 1453 elif f in umap:
1454 1454 # this unknown file is the same as the checkout
1455 1455 get[f] = m2[f]
1456 1456
1457 1457 if not s and mfw[f] != mf2[f]:
1458 1458 if force:
1459 1459 self.ui.debug(_(" updating permissions for %s\n") % f)
1460 1460 util.set_exec(self.wjoin(f), mf2[f])
1461 1461 else:
1462 1462 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1463 1463 mode = ((a^b) | (a^c)) ^ a
1464 1464 if mode != b:
1465 1465 self.ui.debug(_(" updating permissions for %s\n") % f)
1466 1466 util.set_exec(self.wjoin(f), mode)
1467 1467 del m2[f]
1468 1468 elif f in ma:
1469 1469 if n != ma[f]:
1470 1470 r = _("d")
1471 1471 if not force and (linear_path or allow):
1472 1472 r = self.ui.prompt(
1473 1473 (_(" local changed %s which remote deleted\n") % f) +
1474 1474 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1475 1475 if r == _("d"):
1476 1476 remove.append(f)
1477 1477 else:
1478 1478 self.ui.debug(_("other deleted %s\n") % f)
1479 1479 remove.append(f) # other deleted it
1480 1480 else:
1481 1481 # file is created on branch or in working directory
1482 1482 if force and f not in umap:
1483 1483 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1484 1484 remove.append(f)
1485 1485 elif n == m1.get(f, nullid): # same as parent
1486 1486 if p2 == pa: # going backwards?
1487 1487 self.ui.debug(_("remote deleted %s\n") % f)
1488 1488 remove.append(f)
1489 1489 else:
1490 1490 self.ui.debug(_("local modified %s, keeping\n") % f)
1491 1491 else:
1492 1492 self.ui.debug(_("working dir created %s, keeping\n") % f)
1493 1493
1494 1494 for f, n in m2.iteritems():
1495 1495 if choose and not choose(f): continue
1496 1496 if f[0] == "/": continue
1497 1497 if f in ma and n != ma[f]:
1498 1498 r = _("k")
1499 1499 if not force and (linear_path or allow):
1500 1500 r = self.ui.prompt(
1501 1501 (_("remote changed %s which local deleted\n") % f) +
1502 1502 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1503 1503 if r == _("k"): get[f] = n
1504 1504 elif f not in ma:
1505 1505 self.ui.debug(_("remote created %s\n") % f)
1506 1506 get[f] = n
1507 1507 else:
1508 1508 if force or p2 == pa: # going backwards?
1509 1509 self.ui.debug(_("local deleted %s, recreating\n") % f)
1510 1510 get[f] = n
1511 1511 else:
1512 1512 self.ui.debug(_("local deleted %s\n") % f)
1513 1513
1514 1514 del mw, m1, m2, ma
1515 1515
1516 1516 if force:
1517 1517 for f in merge:
1518 1518 get[f] = merge[f][1]
1519 1519 merge = {}
1520 1520
1521 1521 if linear_path or force:
1522 1522 # we don't need to do any magic, just jump to the new rev
1523 1523 branch_merge = False
1524 1524 p1, p2 = p2, nullid
1525 1525 else:
1526 1526 if not allow:
1527 1527 self.ui.status(_("this update spans a branch"
1528 1528 " affecting the following files:\n"))
1529 1529 fl = merge.keys() + get.keys()
1530 1530 fl.sort()
1531 1531 for f in fl:
1532 1532 cf = ""
1533 1533 if f in merge: cf = _(" (resolve)")
1534 1534 self.ui.status(" %s%s\n" % (f, cf))
1535 1535 self.ui.warn(_("aborting update spanning branches!\n"))
1536 1536 self.ui.status(_("(use update -m to merge across branches"
1537 1537 " or -C to lose changes)\n"))
1538 1538 return 1
1539 1539 branch_merge = True
1540 1540
1541 1541 # get the files we don't need to change
1542 1542 files = get.keys()
1543 1543 files.sort()
1544 1544 for f in files:
1545 1545 if f[0] == "/": continue
1546 1546 self.ui.note(_("getting %s\n") % f)
1547 1547 t = self.file(f).read(get[f])
1548 1548 self.wwrite(f, t)
1549 1549 util.set_exec(self.wjoin(f), mf2[f])
1550 1550 if moddirstate:
1551 1551 if branch_merge:
1552 1552 self.dirstate.update([f], 'n', st_mtime=-1)
1553 1553 else:
1554 1554 self.dirstate.update([f], 'n')
1555 1555
1556 1556 # merge the tricky bits
1557 1557 files = merge.keys()
1558 1558 files.sort()
1559 1559 for f in files:
1560 1560 self.ui.status(_("merging %s\n") % f)
1561 1561 my, other, flag = merge[f]
1562 1562 self.merge3(f, my, other)
1563 1563 util.set_exec(self.wjoin(f), flag)
1564 1564 if moddirstate:
1565 1565 if branch_merge:
1566 1566 # We've done a branch merge, mark this file as merged
1567 1567 # so that we properly record the merger later
1568 1568 self.dirstate.update([f], 'm')
1569 1569 else:
1570 1570 # We've update-merged a locally modified file, so
1571 1571 # we set the dirstate to emulate a normal checkout
1572 1572 # of that file some time in the past. Thus our
1573 1573 # merge will appear as a normal local file
1574 1574 # modification.
1575 1575 f_len = len(self.file(f).read(other))
1576 1576 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1577 1577
1578 1578 remove.sort()
1579 1579 for f in remove:
1580 1580 self.ui.note(_("removing %s\n") % f)
1581 1581 try:
1582 1582 util.unlink(self.wjoin(f))
1583 1583 except OSError, inst:
1584 1584 if inst.errno != errno.ENOENT:
1585 1585 self.ui.warn(_("update failed to remove %s: %s!\n") %
1586 1586 (f, inst.strerror))
1587 1587 if moddirstate:
1588 1588 if branch_merge:
1589 1589 self.dirstate.update(remove, 'r')
1590 1590 else:
1591 1591 self.dirstate.forget(remove)
1592 1592
1593 1593 if moddirstate:
1594 1594 self.dirstate.setparents(p1, p2)
1595 1595
1596 1596 def merge3(self, fn, my, other):
1597 1597 """perform a 3-way merge in the working directory"""
1598 1598
1599 1599 def temp(prefix, node):
1600 1600 pre = "%s~%s." % (os.path.basename(fn), prefix)
1601 1601 (fd, name) = tempfile.mkstemp("", pre)
1602 1602 f = os.fdopen(fd, "wb")
1603 1603 self.wwrite(fn, fl.read(node), f)
1604 1604 f.close()
1605 1605 return name
1606 1606
1607 1607 fl = self.file(fn)
1608 1608 base = fl.ancestor(my, other)
1609 1609 a = self.wjoin(fn)
1610 1610 b = temp("base", base)
1611 1611 c = temp("other", other)
1612 1612
1613 1613 self.ui.note(_("resolving %s\n") % fn)
1614 1614 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1615 1615 (fn, short(my), short(other), short(base)))
1616 1616
1617 1617 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1618 1618 or "hgmerge")
1619 1619 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1620 1620 if r:
1621 1621 self.ui.warn(_("merging %s failed!\n") % fn)
1622 1622
1623 1623 os.unlink(b)
1624 1624 os.unlink(c)
1625 1625
1626 1626 def verify(self):
1627 1627 filelinkrevs = {}
1628 1628 filenodes = {}
1629 1629 changesets = revisions = files = 0
1630 1630 errors = [0]
1631 1631 neededmanifests = {}
1632 1632
1633 1633 def err(msg):
1634 1634 self.ui.warn(msg + "\n")
1635 1635 errors[0] += 1
1636 1636
1637 1637 seen = {}
1638 1638 self.ui.status(_("checking changesets\n"))
1639 1639 d = self.changelog.checksize()
1640 1640 if d:
1641 1641 err(_("changeset data short %d bytes") % d)
1642 1642 for i in range(self.changelog.count()):
1643 1643 changesets += 1
1644 1644 n = self.changelog.node(i)
1645 1645 l = self.changelog.linkrev(n)
1646 1646 if l != i:
1647 1647 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1648 1648 if n in seen:
1649 1649 err(_("duplicate changeset at revision %d") % i)
1650 1650 seen[n] = 1
1651 1651
1652 1652 for p in self.changelog.parents(n):
1653 1653 if p not in self.changelog.nodemap:
1654 1654 err(_("changeset %s has unknown parent %s") %
1655 1655 (short(n), short(p)))
1656 1656 try:
1657 1657 changes = self.changelog.read(n)
1658 1658 except KeyboardInterrupt:
1659 1659 self.ui.warn(_("interrupted"))
1660 1660 raise
1661 1661 except Exception, inst:
1662 1662 err(_("unpacking changeset %s: %s") % (short(n), inst))
1663 1663
1664 1664 neededmanifests[changes[0]] = n
1665 1665
1666 1666 for f in changes[3]:
1667 1667 filelinkrevs.setdefault(f, []).append(i)
1668 1668
1669 1669 seen = {}
1670 1670 self.ui.status(_("checking manifests\n"))
1671 1671 d = self.manifest.checksize()
1672 1672 if d:
1673 1673 err(_("manifest data short %d bytes") % d)
1674 1674 for i in range(self.manifest.count()):
1675 1675 n = self.manifest.node(i)
1676 1676 l = self.manifest.linkrev(n)
1677 1677
1678 1678 if l < 0 or l >= self.changelog.count():
1679 1679 err(_("bad manifest link (%d) at revision %d") % (l, i))
1680 1680
1681 1681 if n in neededmanifests:
1682 1682 del neededmanifests[n]
1683 1683
1684 1684 if n in seen:
1685 1685 err(_("duplicate manifest at revision %d") % i)
1686 1686
1687 1687 seen[n] = 1
1688 1688
1689 1689 for p in self.manifest.parents(n):
1690 1690 if p not in self.manifest.nodemap:
1691 1691 err(_("manifest %s has unknown parent %s") %
1692 1692 (short(n), short(p)))
1693 1693
1694 1694 try:
1695 1695 delta = mdiff.patchtext(self.manifest.delta(n))
1696 1696 except KeyboardInterrupt:
1697 1697 self.ui.warn(_("interrupted"))
1698 1698 raise
1699 1699 except Exception, inst:
1700 1700 err(_("unpacking manifest %s: %s") % (short(n), inst))
1701 1701
1702 1702 ff = [ l.split('\0') for l in delta.splitlines() ]
1703 1703 for f, fn in ff:
1704 1704 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1705 1705
1706 1706 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1707 1707
1708 1708 for m,c in neededmanifests.items():
1709 1709 err(_("Changeset %s refers to unknown manifest %s") %
1710 1710 (short(m), short(c)))
1711 1711 del neededmanifests
1712 1712
1713 1713 for f in filenodes:
1714 1714 if f not in filelinkrevs:
1715 1715 err(_("file %s in manifest but not in changesets") % f)
1716 1716
1717 1717 for f in filelinkrevs:
1718 1718 if f not in filenodes:
1719 1719 err(_("file %s in changeset but not in manifest") % f)
1720 1720
1721 1721 self.ui.status(_("checking files\n"))
1722 1722 ff = filenodes.keys()
1723 1723 ff.sort()
1724 1724 for f in ff:
1725 1725 if f == "/dev/null": continue
1726 1726 files += 1
1727 1727 fl = self.file(f)
1728 1728 d = fl.checksize()
1729 1729 if d:
1730 1730 err(_("%s file data short %d bytes") % (f, d))
1731 1731
1732 1732 nodes = { nullid: 1 }
1733 1733 seen = {}
1734 1734 for i in range(fl.count()):
1735 1735 revisions += 1
1736 1736 n = fl.node(i)
1737 1737
1738 1738 if n in seen:
1739 1739 err(_("%s: duplicate revision %d") % (f, i))
1740 1740 if n not in filenodes[f]:
1741 1741 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1742 1742 else:
1743 1743 del filenodes[f][n]
1744 1744
1745 1745 flr = fl.linkrev(n)
1746 1746 if flr not in filelinkrevs[f]:
1747 1747 err(_("%s:%s points to unexpected changeset %d")
1748 1748 % (f, short(n), flr))
1749 1749 else:
1750 1750 filelinkrevs[f].remove(flr)
1751 1751
1752 1752 # verify contents
1753 1753 try:
1754 1754 t = fl.read(n)
1755 1755 except KeyboardInterrupt:
1756 1756 self.ui.warn(_("interrupted"))
1757 1757 raise
1758 1758 except Exception, inst:
1759 1759 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1760 1760
1761 1761 # verify parents
1762 1762 (p1, p2) = fl.parents(n)
1763 1763 if p1 not in nodes:
1764 1764 err(_("file %s:%s unknown parent 1 %s") %
1765 1765 (f, short(n), short(p1)))
1766 1766 if p2 not in nodes:
1767 1767 err(_("file %s:%s unknown parent 2 %s") %
1768 1768 (f, short(n), short(p1)))
1769 1769 nodes[n] = 1
1770 1770
1771 1771 # cross-check
1772 1772 for node in filenodes[f]:
1773 1773 err(_("node %s in manifests not in %s") % (hex(node), f))
1774 1774
1775 1775 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1776 1776 (files, changesets, revisions))
1777 1777
1778 1778 if errors[0]:
1779 1779 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1780 1780 return 1
@@ -1,888 +1,893 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
17 17
18 18 def hash(text, p1, p2):
19 19 """generate a hash from the given text and its parent hashes
20 20
21 21 This hash combines both the current file contents and its history
22 22 in a manner that makes it easy to distinguish nodes with the same
23 23 content in the revision graph.
24 24 """
25 25 l = [p1, p2]
26 26 l.sort()
27 27 s = sha.new(l[0])
28 28 s.update(l[1])
29 29 s.update(text)
30 30 return s.digest()
31 31
32 32 def compress(text):
33 33 """ generate a possibly-compressed representation of text """
34 34 if not text: return ("", text)
35 35 if len(text) < 44:
36 36 if text[0] == '\0': return ("", text)
37 37 return ('u', text)
38 38 bin = zlib.compress(text)
39 39 if len(bin) > len(text):
40 40 if text[0] == '\0': return ("", text)
41 41 return ('u', text)
42 42 return ("", bin)
43 43
44 44 def decompress(bin):
45 45 """ decompress the given input """
46 46 if not bin: return bin
47 47 t = bin[0]
48 48 if t == '\0': return bin
49 49 if t == 'x': return zlib.decompress(bin)
50 50 if t == 'u': return bin[1:]
51 51 raise RevlogError(_("unknown compression type %s") % t)
52 52
53 53 indexformat = ">4l20s20s20s"
54 54
55 55 class lazyparser:
56 56 """
57 57 this class avoids the need to parse the entirety of large indices
58 58
59 59 By default we parse and load 1000 entries at a time.
60 60
61 61 If no position is specified, we load the whole index, and replace
62 62 the lazy objects in revlog with the underlying objects for
63 63 efficiency in cases where we look at most of the nodes.
64 64 """
65 65 def __init__(self, data, revlog):
66 66 self.data = data
67 67 self.s = struct.calcsize(indexformat)
68 68 self.l = len(data)/self.s
69 69 self.index = [None] * self.l
70 70 self.map = {nullid: -1}
71 71 self.all = 0
72 72 self.revlog = revlog
73 73
74 74 def trunc(self, pos):
75 75 self.l = pos/self.s
76 76
77 77 def load(self, pos=None):
78 78 if self.all: return
79 79 if pos is not None:
80 80 block = pos / 1000
81 81 i = block * 1000
82 82 end = min(self.l, i + 1000)
83 83 else:
84 84 self.all = 1
85 85 i = 0
86 86 end = self.l
87 87 self.revlog.index = self.index
88 88 self.revlog.nodemap = self.map
89 89
90 90 while i < end:
91 91 d = self.data[i * self.s: (i + 1) * self.s]
92 92 e = struct.unpack(indexformat, d)
93 93 self.index[i] = e
94 94 self.map[e[6]] = i
95 95 i += 1
96 96
97 97 class lazyindex:
98 98 """a lazy version of the index array"""
99 99 def __init__(self, parser):
100 100 self.p = parser
101 101 def __len__(self):
102 102 return len(self.p.index)
103 103 def load(self, pos):
104 104 if pos < 0:
105 105 pos += len(self.p.index)
106 106 self.p.load(pos)
107 107 return self.p.index[pos]
108 108 def __getitem__(self, pos):
109 109 return self.p.index[pos] or self.load(pos)
110 110 def __delitem__(self, pos):
111 111 del self.p.index[pos]
112 112 def append(self, e):
113 113 self.p.index.append(e)
114 114 def trunc(self, pos):
115 115 self.p.trunc(pos)
116 116
117 117 class lazymap:
118 118 """a lazy version of the node map"""
119 119 def __init__(self, parser):
120 120 self.p = parser
121 121 def load(self, key):
122 122 if self.p.all: return
123 123 n = self.p.data.find(key)
124 124 if n < 0:
125 125 raise KeyError(key)
126 126 pos = n / self.p.s
127 127 self.p.load(pos)
128 128 def __contains__(self, key):
129 129 self.p.load()
130 130 return key in self.p.map
131 131 def __iter__(self):
132 132 yield nullid
133 133 for i in xrange(self.p.l):
134 134 try:
135 135 yield self.p.index[i][6]
136 136 except:
137 137 self.p.load(i)
138 138 yield self.p.index[i][6]
139 139 def __getitem__(self, key):
140 140 try:
141 141 return self.p.map[key]
142 142 except KeyError:
143 143 try:
144 144 self.load(key)
145 145 return self.p.map[key]
146 146 except KeyError:
147 147 raise KeyError("node " + hex(key))
148 148 def __setitem__(self, key, val):
149 149 self.p.map[key] = val
150 150 def __delitem__(self, key):
151 151 del self.p.map[key]
152 152
153 153 class RevlogError(Exception): pass
154 154
155 155 class revlog:
156 156 """
157 157 the underlying revision storage object
158 158
159 159 A revlog consists of two parts, an index and the revision data.
160 160
161 161 The index is a file with a fixed record size containing
162 162 information on each revision, includings its nodeid (hash), the
163 163 nodeids of its parents, the position and offset of its data within
164 164 the data file, and the revision it's based on. Finally, each entry
165 165 contains a linkrev entry that can serve as a pointer to external
166 166 data.
167 167
168 168 The revision data itself is a linear collection of data chunks.
169 169 Each chunk represents a revision and is usually represented as a
170 170 delta against the previous chunk. To bound lookup time, runs of
171 171 deltas are limited to about 2 times the length of the original
172 172 version data. This makes retrieval of a version proportional to
173 173 its size, or O(1) relative to the number of revisions.
174 174
175 175 Both pieces of the revlog are written to in an append-only
176 176 fashion, which means we never need to rewrite a file to insert or
177 177 remove data, and can use some simple techniques to avoid the need
178 178 for locking while reading.
179 179 """
180 180 def __init__(self, opener, indexfile, datafile):
181 181 """
182 182 create a revlog object
183 183
184 184 opener is a function that abstracts the file opening operation
185 185 and can be used to implement COW semantics or the like.
186 186 """
187 187 self.indexfile = indexfile
188 188 self.datafile = datafile
189 189 self.opener = opener
190 190 self.cache = None
191 191
192 192 try:
193 193 i = self.opener(self.indexfile).read()
194 194 except IOError, inst:
195 195 if inst.errno != errno.ENOENT:
196 196 raise
197 197 i = ""
198 198
199 199 if len(i) > 10000:
200 200 # big index, let's parse it on demand
201 201 parser = lazyparser(i, self)
202 202 self.index = lazyindex(parser)
203 203 self.nodemap = lazymap(parser)
204 204 else:
205 205 s = struct.calcsize(indexformat)
206 206 l = len(i) / s
207 207 self.index = [None] * l
208 208 m = [None] * l
209 209
210 210 n = 0
211 211 for f in xrange(0, len(i), s):
212 212 # offset, size, base, linkrev, p1, p2, nodeid
213 213 e = struct.unpack(indexformat, i[f:f + s])
214 214 m[n] = (e[6], n)
215 215 self.index[n] = e
216 216 n += 1
217 217
218 218 self.nodemap = dict(m)
219 219 self.nodemap[nullid] = -1
220 220
221 221 def tip(self): return self.node(len(self.index) - 1)
222 222 def count(self): return len(self.index)
223 223 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
224 224 def rev(self, node):
225 225 try:
226 226 return self.nodemap[node]
227 227 except KeyError:
228 228 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
229 229 def linkrev(self, node): return self.index[self.rev(node)][3]
230 230 def parents(self, node):
231 231 if node == nullid: return (nullid, nullid)
232 232 return self.index[self.rev(node)][4:6]
233 233
234 234 def start(self, rev): return self.index[rev][0]
235 235 def length(self, rev): return self.index[rev][1]
236 236 def end(self, rev): return self.start(rev) + self.length(rev)
237 237 def base(self, rev): return self.index[rev][2]
238 238
239 239 def reachable(self, rev, stop=None):
240 240 reachable = {}
241 241 visit = [rev]
242 242 reachable[rev] = 1
243 243 if stop:
244 244 stopn = self.rev(stop)
245 245 else:
246 246 stopn = 0
247 247 while visit:
248 248 n = visit.pop(0)
249 249 if n == stop:
250 250 continue
251 251 if n == nullid:
252 252 continue
253 253 for p in self.parents(n):
254 254 if self.rev(p) < stopn:
255 255 continue
256 256 if p not in reachable:
257 257 reachable[p] = 1
258 258 visit.append(p)
259 259 return reachable
260 260
261 261 def nodesbetween(self, roots=None, heads=None):
262 262 """Return a tuple containing three elements. Elements 1 and 2 contain
263 263 a final list bases and heads after all the unreachable ones have been
264 264 pruned. Element 0 contains a topologically sorted list of all
265 265
266 266 nodes that satisfy these constraints:
267 267 1. All nodes must be descended from a node in roots (the nodes on
268 268 roots are considered descended from themselves).
269 269 2. All nodes must also be ancestors of a node in heads (the nodes in
270 270 heads are considered to be their own ancestors).
271 271
272 272 If roots is unspecified, nullid is assumed as the only root.
273 273 If heads is unspecified, it is taken to be the output of the
274 274 heads method (i.e. a list of all nodes in the repository that
275 275 have no children)."""
276 276 nonodes = ([], [], [])
277 277 if roots is not None:
278 278 roots = list(roots)
279 279 if not roots:
280 280 return nonodes
281 281 lowestrev = min([self.rev(n) for n in roots])
282 282 else:
283 283 roots = [nullid] # Everybody's a descendent of nullid
284 284 lowestrev = -1
285 285 if (lowestrev == -1) and (heads is None):
286 286 # We want _all_ the nodes!
287 287 return ([self.node(r) for r in xrange(0, self.count())],
288 288 [nullid], list(self.heads()))
289 289 if heads is None:
290 290 # All nodes are ancestors, so the latest ancestor is the last
291 291 # node.
292 292 highestrev = self.count() - 1
293 293 # Set ancestors to None to signal that every node is an ancestor.
294 294 ancestors = None
295 295 # Set heads to an empty dictionary for later discovery of heads
296 296 heads = {}
297 297 else:
298 298 heads = list(heads)
299 299 if not heads:
300 300 return nonodes
301 301 ancestors = {}
302 302 # Start at the top and keep marking parents until we're done.
303 303 nodestotag = heads[:]
304 304 # Turn heads into a dictionary so we can remove 'fake' heads.
305 305 # Also, later we will be using it to filter out the heads we can't
306 306 # find from roots.
307 307 heads = dict.fromkeys(heads, 0)
308 308 # Remember where the top was so we can use it as a limit later.
309 309 highestrev = max([self.rev(n) for n in nodestotag])
310 310 while nodestotag:
311 311 # grab a node to tag
312 312 n = nodestotag.pop()
313 313 # Never tag nullid
314 314 if n == nullid:
315 315 continue
316 316 # A node's revision number represents its place in a
317 317 # topologically sorted list of nodes.
318 318 r = self.rev(n)
319 319 if r >= lowestrev:
320 320 if n not in ancestors:
321 321 # If we are possibly a descendent of one of the roots
322 322 # and we haven't already been marked as an ancestor
323 323 ancestors[n] = 1 # Mark as ancestor
324 324 # Add non-nullid parents to list of nodes to tag.
325 325 nodestotag.extend([p for p in self.parents(n) if
326 326 p != nullid])
327 327 elif n in heads: # We've seen it before, is it a fake head?
328 328 # So it is, real heads should not be the ancestors of
329 329 # any other heads.
330 330 heads.pop(n)
331 331 if not ancestors:
332 332 return nonodes
333 333 # Now that we have our set of ancestors, we want to remove any
334 334 # roots that are not ancestors.
335 335
336 336 # If one of the roots was nullid, everything is included anyway.
337 337 if lowestrev > -1:
338 338 # But, since we weren't, let's recompute the lowest rev to not
339 339 # include roots that aren't ancestors.
340 340
341 341 # Filter out roots that aren't ancestors of heads
342 342 roots = [n for n in roots if n in ancestors]
343 343 # Recompute the lowest revision
344 344 if roots:
345 345 lowestrev = min([self.rev(n) for n in roots])
346 346 else:
347 347 # No more roots? Return empty list
348 348 return nonodes
349 349 else:
350 350 # We are descending from nullid, and don't need to care about
351 351 # any other roots.
352 352 lowestrev = -1
353 353 roots = [nullid]
354 354 # Transform our roots list into a 'set' (i.e. a dictionary where the
355 355 # values don't matter.
356 356 descendents = dict.fromkeys(roots, 1)
357 357 # Also, keep the original roots so we can filter out roots that aren't
358 358 # 'real' roots (i.e. are descended from other roots).
359 359 roots = descendents.copy()
360 360 # Our topologically sorted list of output nodes.
361 361 orderedout = []
362 362 # Don't start at nullid since we don't want nullid in our output list,
363 363 # and if nullid shows up in descedents, empty parents will look like
364 364 # they're descendents.
365 365 for r in xrange(max(lowestrev, 0), highestrev + 1):
366 366 n = self.node(r)
367 367 isdescendent = False
368 368 if lowestrev == -1: # Everybody is a descendent of nullid
369 369 isdescendent = True
370 370 elif n in descendents:
371 371 # n is already a descendent
372 372 isdescendent = True
373 373 # This check only needs to be done here because all the roots
374 374 # will start being marked is descendents before the loop.
375 375 if n in roots:
376 376 # If n was a root, check if it's a 'real' root.
377 377 p = tuple(self.parents(n))
378 378 # If any of its parents are descendents, it's not a root.
379 379 if (p[0] in descendents) or (p[1] in descendents):
380 380 roots.pop(n)
381 381 else:
382 382 p = tuple(self.parents(n))
383 383 # A node is a descendent if either of its parents are
384 384 # descendents. (We seeded the dependents list with the roots
385 385 # up there, remember?)
386 386 if (p[0] in descendents) or (p[1] in descendents):
387 387 descendents[n] = 1
388 388 isdescendent = True
389 389 if isdescendent and ((ancestors is None) or (n in ancestors)):
390 390 # Only include nodes that are both descendents and ancestors.
391 391 orderedout.append(n)
392 392 if (ancestors is not None) and (n in heads):
393 393 # We're trying to figure out which heads are reachable
394 394 # from roots.
395 395 # Mark this head as having been reached
396 396 heads[n] = 1
397 397 elif ancestors is None:
398 398 # Otherwise, we're trying to discover the heads.
399 399 # Assume this is a head because if it isn't, the next step
400 400 # will eventually remove it.
401 401 heads[n] = 1
402 402 # But, obviously its parents aren't.
403 403 for p in self.parents(n):
404 404 heads.pop(p, None)
405 405 heads = [n for n in heads.iterkeys() if heads[n] != 0]
406 406 roots = roots.keys()
407 407 assert orderedout
408 408 assert roots
409 409 assert heads
410 410 return (orderedout, roots, heads)
411 411
412 def heads(self, start=nullid):
412 def heads(self, start=None):
413 413 """return the list of all nodes that have no children
414 if start is specified, only heads that are children of
415 start will be returned"""
414
415 if start is specified, only heads that are descendants of
416 start will be returned
417
418 """
419 if start is None:
420 start = nullid
416 421 reachable = {start: 1}
417 422 heads = {start: 1}
418 423 startrev = self.rev(start)
419 424
420 425 for r in xrange(startrev + 1, self.count()):
421 426 n = self.node(r)
422 427 for pn in self.parents(n):
423 428 if pn in reachable:
424 429 reachable[n] = 1
425 430 heads[n] = 1
426 431 if pn in heads:
427 432 del heads[pn]
428 433 return heads.keys()
429 434
430 435 def children(self, node):
431 436 """find the children of a given node"""
432 437 c = []
433 438 p = self.rev(node)
434 439 for r in range(p + 1, self.count()):
435 440 n = self.node(r)
436 441 for pn in self.parents(n):
437 442 if pn == node:
438 443 c.append(n)
439 444 continue
440 445 elif pn == nullid:
441 446 continue
442 447 return c
443 448
444 449 def lookup(self, id):
445 450 """locate a node based on revision number or subset of hex nodeid"""
446 451 try:
447 452 rev = int(id)
448 453 if str(rev) != id: raise ValueError
449 454 if rev < 0: rev = self.count() + rev
450 455 if rev < 0 or rev >= self.count(): raise ValueError
451 456 return self.node(rev)
452 457 except (ValueError, OverflowError):
453 458 c = []
454 459 for n in self.nodemap:
455 460 if hex(n).startswith(id):
456 461 c.append(n)
457 462 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
458 463 if len(c) < 1: raise RevlogError(_("No match found"))
459 464 return c[0]
460 465
461 466 return None
462 467
463 468 def diff(self, a, b):
464 469 """return a delta between two revisions"""
465 470 return mdiff.textdiff(a, b)
466 471
467 472 def patches(self, t, pl):
468 473 """apply a list of patches to a string"""
469 474 return mdiff.patches(t, pl)
470 475
471 476 def delta(self, node):
472 477 """return or calculate a delta between a node and its predecessor"""
473 478 r = self.rev(node)
474 479 b = self.base(r)
475 480 if r == b:
476 481 return self.diff(self.revision(self.node(r - 1)),
477 482 self.revision(node))
478 483 else:
479 484 f = self.opener(self.datafile)
480 485 f.seek(self.start(r))
481 486 data = f.read(self.length(r))
482 487 return decompress(data)
483 488
484 489 def revision(self, node):
485 490 """return an uncompressed revision of a given"""
486 491 if node == nullid: return ""
487 492 if self.cache and self.cache[0] == node: return self.cache[2]
488 493
489 494 # look up what we need to read
490 495 text = None
491 496 rev = self.rev(node)
492 497 start, length, base, link, p1, p2, node = self.index[rev]
493 498 end = start + length
494 499 if base != rev: start = self.start(base)
495 500
496 501 # do we have useful data cached?
497 502 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
498 503 base = self.cache[1]
499 504 start = self.start(base + 1)
500 505 text = self.cache[2]
501 506 last = 0
502 507
503 508 f = self.opener(self.datafile)
504 509 f.seek(start)
505 510 data = f.read(end - start)
506 511
507 512 if text is None:
508 513 last = self.length(base)
509 514 text = decompress(data[:last])
510 515
511 516 bins = []
512 517 for r in xrange(base + 1, rev + 1):
513 518 s = self.length(r)
514 519 bins.append(decompress(data[last:last + s]))
515 520 last = last + s
516 521
517 522 text = mdiff.patches(text, bins)
518 523
519 524 if node != hash(text, p1, p2):
520 525 raise RevlogError(_("integrity check failed on %s:%d")
521 526 % (self.datafile, rev))
522 527
523 528 self.cache = (node, rev, text)
524 529 return text
525 530
526 531 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
527 532 """add a revision to the log
528 533
529 534 text - the revision data to add
530 535 transaction - the transaction object used for rollback
531 536 link - the linkrev data to add
532 537 p1, p2 - the parent nodeids of the revision
533 538 d - an optional precomputed delta
534 539 """
535 540 if text is None: text = ""
536 541 if p1 is None: p1 = self.tip()
537 542 if p2 is None: p2 = nullid
538 543
539 544 node = hash(text, p1, p2)
540 545
541 546 if node in self.nodemap:
542 547 return node
543 548
544 549 n = self.count()
545 550 t = n - 1
546 551
547 552 if n:
548 553 base = self.base(t)
549 554 start = self.start(base)
550 555 end = self.end(t)
551 556 if not d:
552 557 prev = self.revision(self.tip())
553 558 d = self.diff(prev, str(text))
554 559 data = compress(d)
555 560 l = len(data[1]) + len(data[0])
556 561 dist = end - start + l
557 562
558 563 # full versions are inserted when the needed deltas
559 564 # become comparable to the uncompressed text
560 565 if not n or dist > len(text) * 2:
561 566 data = compress(text)
562 567 l = len(data[1]) + len(data[0])
563 568 base = n
564 569 else:
565 570 base = self.base(t)
566 571
567 572 offset = 0
568 573 if t >= 0:
569 574 offset = self.end(t)
570 575
571 576 e = (offset, l, base, link, p1, p2, node)
572 577
573 578 self.index.append(e)
574 579 self.nodemap[node] = n
575 580 entry = struct.pack(indexformat, *e)
576 581
577 582 transaction.add(self.datafile, e[0])
578 583 f = self.opener(self.datafile, "a")
579 584 if data[0]:
580 585 f.write(data[0])
581 586 f.write(data[1])
582 587 transaction.add(self.indexfile, n * len(entry))
583 588 self.opener(self.indexfile, "a").write(entry)
584 589
585 590 self.cache = (node, n, text)
586 591 return node
587 592
588 593 def ancestor(self, a, b):
589 594 """calculate the least common ancestor of nodes a and b"""
590 595 # calculate the distance of every node from root
591 596 dist = {nullid: 0}
592 597 for i in xrange(self.count()):
593 598 n = self.node(i)
594 599 p1, p2 = self.parents(n)
595 600 dist[n] = max(dist[p1], dist[p2]) + 1
596 601
597 602 # traverse ancestors in order of decreasing distance from root
598 603 def ancestors(node):
599 604 # we store negative distances because heap returns smallest member
600 605 h = [(-dist[node], node)]
601 606 seen = {}
602 607 earliest = self.count()
603 608 while h:
604 609 d, n = heapq.heappop(h)
605 610 if n not in seen:
606 611 seen[n] = 1
607 612 r = self.rev(n)
608 613 yield (-d, n)
609 614 for p in self.parents(n):
610 615 heapq.heappush(h, (-dist[p], p))
611 616
612 617 def generations(node):
613 618 sg, s = None, {}
614 619 for g,n in ancestors(node):
615 620 if g != sg:
616 621 if sg:
617 622 yield sg, s
618 623 sg, s = g, {n:1}
619 624 else:
620 625 s[n] = 1
621 626 yield sg, s
622 627
623 628 x = generations(a)
624 629 y = generations(b)
625 630 gx = x.next()
626 631 gy = y.next()
627 632
628 633 # increment each ancestor list until it is closer to root than
629 634 # the other, or they match
630 635 while 1:
631 636 #print "ancestor gen %s %s" % (gx[0], gy[0])
632 637 if gx[0] == gy[0]:
633 638 # find the intersection
634 639 i = [ n for n in gx[1] if n in gy[1] ]
635 640 if i:
636 641 return i[0]
637 642 else:
638 643 #print "next"
639 644 gy = y.next()
640 645 gx = x.next()
641 646 elif gx[0] < gy[0]:
642 647 #print "next y"
643 648 gy = y.next()
644 649 else:
645 650 #print "next x"
646 651 gx = x.next()
647 652
648 653 def group(self, nodelist, lookup, infocollect = None):
649 654 """calculate a delta group
650 655
651 656 Given a list of changeset revs, return a set of deltas and
652 657 metadata corresponding to nodes. the first delta is
653 658 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
654 659 have this parent as it has all history before these
655 660 changesets. parent is parent[0]
656 661 """
657 662 revs = [self.rev(n) for n in nodelist]
658 663 needed = dict.fromkeys(revs, 1)
659 664
660 665 # if we don't have any revisions touched by these changesets, bail
661 666 if not revs:
662 667 yield struct.pack(">l", 0)
663 668 return
664 669
665 670 # add the parent of the first rev
666 671 p = self.parents(self.node(revs[0]))[0]
667 672 revs.insert(0, self.rev(p))
668 673
669 674 # for each delta that isn't contiguous in the log, we need to
670 675 # reconstruct the base, reconstruct the result, and then
671 676 # calculate the delta. We also need to do this where we've
672 677 # stored a full version and not a delta
673 678 for i in xrange(0, len(revs) - 1):
674 679 a, b = revs[i], revs[i + 1]
675 680 if a + 1 != b or self.base(b) == b:
676 681 for j in xrange(self.base(a), a + 1):
677 682 needed[j] = 1
678 683 for j in xrange(self.base(b), b + 1):
679 684 needed[j] = 1
680 685
681 686 # calculate spans to retrieve from datafile
682 687 needed = needed.keys()
683 688 needed.sort()
684 689 spans = []
685 690 oo = -1
686 691 ol = 0
687 692 for n in needed:
688 693 if n < 0: continue
689 694 o = self.start(n)
690 695 l = self.length(n)
691 696 if oo + ol == o: # can we merge with the previous?
692 697 nl = spans[-1][2]
693 698 nl.append((n, l))
694 699 ol += l
695 700 spans[-1] = (oo, ol, nl)
696 701 else:
697 702 oo = o
698 703 ol = l
699 704 spans.append((oo, ol, [(n, l)]))
700 705
701 706 # read spans in, divide up chunks
702 707 chunks = {}
703 708 for span in spans:
704 709 # we reopen the file for each span to make http happy for now
705 710 f = self.opener(self.datafile)
706 711 f.seek(span[0])
707 712 data = f.read(span[1])
708 713
709 714 # divide up the span
710 715 pos = 0
711 716 for r, l in span[2]:
712 717 chunks[r] = decompress(data[pos: pos + l])
713 718 pos += l
714 719
715 720 # helper to reconstruct intermediate versions
716 721 def construct(text, base, rev):
717 722 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
718 723 return mdiff.patches(text, bins)
719 724
720 725 # build deltas
721 726 deltas = []
722 727 for d in xrange(0, len(revs) - 1):
723 728 a, b = revs[d], revs[d + 1]
724 729 n = self.node(b)
725 730
726 731 if infocollect is not None:
727 732 infocollect(n)
728 733
729 734 # do we need to construct a new delta?
730 735 if a + 1 != b or self.base(b) == b:
731 736 if a >= 0:
732 737 base = self.base(a)
733 738 ta = chunks[self.base(a)]
734 739 ta = construct(ta, base, a)
735 740 else:
736 741 ta = ""
737 742
738 743 base = self.base(b)
739 744 if a > base:
740 745 base = a
741 746 tb = ta
742 747 else:
743 748 tb = chunks[self.base(b)]
744 749 tb = construct(tb, base, b)
745 750 d = self.diff(ta, tb)
746 751 else:
747 752 d = chunks[b]
748 753
749 754 p = self.parents(n)
750 755 meta = n + p[0] + p[1] + lookup(n)
751 756 l = struct.pack(">l", len(meta) + len(d) + 4)
752 757 yield l
753 758 yield meta
754 759 yield d
755 760
756 761 yield struct.pack(">l", 0)
757 762
758 763 def addgroup(self, revs, linkmapper, transaction, unique=0):
759 764 """
760 765 add a delta group
761 766
762 767 given a set of deltas, add them to the revision log. the
763 768 first delta is against its parent, which should be in our
764 769 log, the rest are against the previous delta.
765 770 """
766 771
767 772 #track the base of the current delta log
768 773 r = self.count()
769 774 t = r - 1
770 775 node = nullid
771 776
772 777 base = prev = -1
773 778 start = end = measure = 0
774 779 if r:
775 780 start = self.start(self.base(t))
776 781 end = self.end(t)
777 782 measure = self.length(self.base(t))
778 783 base = self.base(t)
779 784 prev = self.tip()
780 785
781 786 transaction.add(self.datafile, end)
782 787 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
783 788 dfh = self.opener(self.datafile, "a")
784 789 ifh = self.opener(self.indexfile, "a")
785 790
786 791 # loop through our set of deltas
787 792 chain = None
788 793 for chunk in revs:
789 794 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
790 795 link = linkmapper(cs)
791 796 if node in self.nodemap:
792 797 # this can happen if two branches make the same change
793 798 # if unique:
794 799 # raise RevlogError(_("already have %s") % hex(node[:4]))
795 800 chain = node
796 801 continue
797 802 delta = chunk[80:]
798 803
799 804 for p in (p1, p2):
800 805 if not p in self.nodemap:
801 806 raise RevlogError(_("unknown parent %s") % short(p1))
802 807
803 808 if not chain:
804 809 # retrieve the parent revision of the delta chain
805 810 chain = p1
806 811 if not chain in self.nodemap:
807 812 raise RevlogError(_("unknown base %s") % short(chain[:4]))
808 813
809 814 # full versions are inserted when the needed deltas become
810 815 # comparable to the uncompressed text or when the previous
811 816 # version is not the one we have a delta against. We use
812 817 # the size of the previous full rev as a proxy for the
813 818 # current size.
814 819
815 820 if chain == prev:
816 821 tempd = compress(delta)
817 822 cdelta = tempd[0] + tempd[1]
818 823
819 824 if chain != prev or (end - start + len(cdelta)) > measure * 2:
820 825 # flush our writes here so we can read it in revision
821 826 dfh.flush()
822 827 ifh.flush()
823 828 text = self.revision(chain)
824 829 text = self.patches(text, [delta])
825 830 chk = self.addrevision(text, transaction, link, p1, p2)
826 831 if chk != node:
827 832 raise RevlogError(_("consistency error adding group"))
828 833 measure = len(text)
829 834 else:
830 835 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
831 836 self.index.append(e)
832 837 self.nodemap[node] = r
833 838 dfh.write(cdelta)
834 839 ifh.write(struct.pack(indexformat, *e))
835 840
836 841 t, r, chain, prev = r, r + 1, node, node
837 842 start = self.start(self.base(t))
838 843 end = self.end(t)
839 844
840 845 dfh.close()
841 846 ifh.close()
842 847 return node
843 848
844 849 def strip(self, rev, minlink):
845 850 if self.count() == 0 or rev >= self.count():
846 851 return
847 852
848 853 # When stripping away a revision, we need to make sure it
849 854 # does not actually belong to an older changeset.
850 855 # The minlink parameter defines the oldest revision
851 856 # we're allowed to strip away.
852 857 while minlink > self.index[rev][3]:
853 858 rev += 1
854 859 if rev >= self.count():
855 860 return
856 861
857 862 # first truncate the files on disk
858 863 end = self.start(rev)
859 864 self.opener(self.datafile, "a").truncate(end)
860 865 end = rev * struct.calcsize(indexformat)
861 866 self.opener(self.indexfile, "a").truncate(end)
862 867
863 868 # then reset internal state in memory to forget those revisions
864 869 self.cache = None
865 870 for p in self.index[rev:]:
866 871 del self.nodemap[p[6]]
867 872 del self.index[rev:]
868 873
869 874 # truncating the lazyindex also truncates the lazymap.
870 875 if isinstance(self.index, lazyindex):
871 876 self.index.trunc(end)
872 877
873 878
874 879 def checksize(self):
875 880 expected = 0
876 881 if self.count():
877 882 expected = self.end(self.count() - 1)
878 883 try:
879 884 f = self.opener(self.datafile)
880 885 f.seek(0, 2)
881 886 actual = f.tell()
882 887 return expected - actual
883 888 except IOError, inst:
884 889 if inst.errno == errno.ENOENT:
885 890 return 0
886 891 raise
887 892
888 893
General Comments 0
You need to be logged in to leave comments. Login now