##// END OF EJS Templates
fix warnings from pychecker (unused variables and shadowing)
Benoit Boissinot -
r1749:d457fec7 default
parent child Browse files
Show More
@@ -1,2853 +1,2853
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from node import *
10 10 from i18n import gettext as _
11 11 demandload(globals(), "os re sys signal shutil imp urllib pdb")
12 12 demandload(globals(), "fancyopts ui hg util lock revlog")
13 13 demandload(globals(), "fnmatch hgweb mdiff random signal time traceback")
14 14 demandload(globals(), "errno socket version struct atexit sets bz2")
15 15
16 16 class UnknownCommand(Exception):
17 17 """Exception raised if command is not in the command table."""
18 18 class AmbiguousCommand(Exception):
19 19 """Exception raised if command shortcut matches more than one command."""
20 20
21 21 def filterfiles(filters, files):
22 22 l = [x for x in files if x in filters]
23 23
24 24 for t in filters:
25 25 if t and t[-1] != "/":
26 26 t += "/"
27 27 l += [x for x in files if x.startswith(t)]
28 28 return l
29 29
30 30 def relpath(repo, args):
31 31 cwd = repo.getcwd()
32 32 if cwd:
33 33 return [util.normpath(os.path.join(cwd, x)) for x in args]
34 34 return args
35 35
36 36 def matchpats(repo, pats=[], opts={}, head=''):
37 37 cwd = repo.getcwd()
38 38 if not pats and cwd:
39 39 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
40 40 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
41 41 cwd = ''
42 42 return util.cmdmatcher(repo.root, cwd, pats or ['.'], opts.get('include'),
43 43 opts.get('exclude'), head)
44 44
45 45 def makewalk(repo, pats, opts, node=None, head=''):
46 46 files, matchfn, anypats = matchpats(repo, pats, opts, head)
47 47 exact = dict(zip(files, files))
48 48 def walk():
49 49 for src, fn in repo.walk(node=node, files=files, match=matchfn):
50 50 yield src, fn, util.pathto(repo.getcwd(), fn), fn in exact
51 51 return files, matchfn, walk()
52 52
53 53 def walk(repo, pats, opts, node=None, head=''):
54 54 files, matchfn, results = makewalk(repo, pats, opts, node, head)
55 55 for r in results:
56 56 yield r
57 57
58 58 def walkchangerevs(ui, repo, pats, opts):
59 59 '''Iterate over files and the revs they changed in.
60 60
61 61 Callers most commonly need to iterate backwards over the history
62 62 it is interested in. Doing so has awful (quadratic-looking)
63 63 performance, so we use iterators in a "windowed" way.
64 64
65 65 We walk a window of revisions in the desired order. Within the
66 66 window, we first walk forwards to gather data, then in the desired
67 67 order (usually backwards) to display it.
68 68
69 69 This function returns an (iterator, getchange, matchfn) tuple. The
70 70 getchange function returns the changelog entry for a numeric
71 71 revision. The iterator yields 3-tuples. They will be of one of
72 72 the following forms:
73 73
74 74 "window", incrementing, lastrev: stepping through a window,
75 75 positive if walking forwards through revs, last rev in the
76 76 sequence iterated over - use to reset state for the current window
77 77
78 78 "add", rev, fns: out-of-order traversal of the given file names
79 79 fns, which changed during revision rev - use to gather data for
80 80 possible display
81 81
82 82 "iter", rev, None: in-order traversal of the revs earlier iterated
83 83 over with "add" - use to display data'''
84 84
85 85 files, matchfn, anypats = matchpats(repo, pats, opts)
86 86
87 87 if repo.changelog.count() == 0:
88 88 return [], False, matchfn
89 89
90 90 revs = map(int, revrange(ui, repo, opts['rev'] or ['tip:0']))
91 91 wanted = {}
92 92 slowpath = anypats
93 93 window = 300
94 94 fncache = {}
95 95
96 96 chcache = {}
97 97 def getchange(rev):
98 98 ch = chcache.get(rev)
99 99 if ch is None:
100 100 chcache[rev] = ch = repo.changelog.read(repo.lookup(str(rev)))
101 101 return ch
102 102
103 103 if not slowpath and not files:
104 104 # No files, no patterns. Display all revs.
105 105 wanted = dict(zip(revs, revs))
106 106 if not slowpath:
107 107 # Only files, no patterns. Check the history of each file.
108 108 def filerevgen(filelog):
109 109 for i in xrange(filelog.count() - 1, -1, -window):
110 110 revs = []
111 111 for j in xrange(max(0, i - window), i + 1):
112 112 revs.append(filelog.linkrev(filelog.node(j)))
113 113 revs.reverse()
114 114 for rev in revs:
115 115 yield rev
116 116
117 117 minrev, maxrev = min(revs), max(revs)
118 for file in files:
119 filelog = repo.file(file)
118 for file_ in files:
119 filelog = repo.file(file_)
120 120 # A zero count may be a directory or deleted file, so
121 121 # try to find matching entries on the slow path.
122 122 if filelog.count() == 0:
123 123 slowpath = True
124 124 break
125 125 for rev in filerevgen(filelog):
126 126 if rev <= maxrev:
127 127 if rev < minrev:
128 128 break
129 129 fncache.setdefault(rev, [])
130 fncache[rev].append(file)
130 fncache[rev].append(file_)
131 131 wanted[rev] = 1
132 132 if slowpath:
133 133 # The slow path checks files modified in every changeset.
134 134 def changerevgen():
135 135 for i in xrange(repo.changelog.count() - 1, -1, -window):
136 136 for j in xrange(max(0, i - window), i + 1):
137 137 yield j, getchange(j)[3]
138 138
139 139 for rev, changefiles in changerevgen():
140 140 matches = filter(matchfn, changefiles)
141 141 if matches:
142 142 fncache[rev] = matches
143 143 wanted[rev] = 1
144 144
145 145 def iterate():
146 146 for i in xrange(0, len(revs), window):
147 147 yield 'window', revs[0] < revs[-1], revs[-1]
148 148 nrevs = [rev for rev in revs[i:min(i+window, len(revs))]
149 149 if rev in wanted]
150 150 srevs = list(nrevs)
151 151 srevs.sort()
152 152 for rev in srevs:
153 153 fns = fncache.get(rev) or filter(matchfn, getchange(rev)[3])
154 154 yield 'add', rev, fns
155 155 for rev in nrevs:
156 156 yield 'iter', rev, None
157 157 return iterate(), getchange, matchfn
158 158
159 159 revrangesep = ':'
160 160
161 161 def revrange(ui, repo, revs, revlog=None):
162 162 """Yield revision as strings from a list of revision specifications."""
163 163 if revlog is None:
164 164 revlog = repo.changelog
165 165 revcount = revlog.count()
166 166 def fix(val, defval):
167 167 if not val:
168 168 return defval
169 169 try:
170 170 num = int(val)
171 171 if str(num) != val:
172 172 raise ValueError
173 173 if num < 0:
174 174 num += revcount
175 175 if num < 0:
176 176 num = 0
177 177 elif num >= revcount:
178 178 raise ValueError
179 179 except ValueError:
180 180 try:
181 181 num = repo.changelog.rev(repo.lookup(val))
182 182 except KeyError:
183 183 try:
184 184 num = revlog.rev(revlog.lookup(val))
185 185 except KeyError:
186 186 raise util.Abort(_('invalid revision identifier %s'), val)
187 187 return num
188 188 seen = {}
189 189 for spec in revs:
190 190 if spec.find(revrangesep) >= 0:
191 191 start, end = spec.split(revrangesep, 1)
192 192 start = fix(start, 0)
193 193 end = fix(end, revcount - 1)
194 194 step = start > end and -1 or 1
195 195 for rev in xrange(start, end+step, step):
196 196 if rev in seen:
197 197 continue
198 198 seen[rev] = 1
199 199 yield str(rev)
200 200 else:
201 201 rev = fix(spec, None)
202 202 if rev in seen:
203 203 continue
204 204 seen[rev] = 1
205 205 yield str(rev)
206 206
207 207 def make_filename(repo, r, pat, node=None,
208 208 total=None, seqno=None, revwidth=None, pathname=None):
209 209 node_expander = {
210 210 'H': lambda: hex(node),
211 211 'R': lambda: str(r.rev(node)),
212 212 'h': lambda: short(node),
213 213 }
214 214 expander = {
215 215 '%': lambda: '%',
216 216 'b': lambda: os.path.basename(repo.root),
217 217 }
218 218
219 219 try:
220 220 if node:
221 221 expander.update(node_expander)
222 222 if node and revwidth is not None:
223 223 expander['r'] = lambda: str(r.rev(node)).zfill(revwidth)
224 224 if total is not None:
225 225 expander['N'] = lambda: str(total)
226 226 if seqno is not None:
227 227 expander['n'] = lambda: str(seqno)
228 228 if total is not None and seqno is not None:
229 229 expander['n'] = lambda:str(seqno).zfill(len(str(total)))
230 230 if pathname is not None:
231 231 expander['s'] = lambda: os.path.basename(pathname)
232 232 expander['d'] = lambda: os.path.dirname(pathname) or '.'
233 233 expander['p'] = lambda: pathname
234 234
235 235 newname = []
236 236 patlen = len(pat)
237 237 i = 0
238 238 while i < patlen:
239 239 c = pat[i]
240 240 if c == '%':
241 241 i += 1
242 242 c = pat[i]
243 243 c = expander[c]()
244 244 newname.append(c)
245 245 i += 1
246 246 return ''.join(newname)
247 247 except KeyError, inst:
248 248 raise util.Abort(_("invalid format spec '%%%s' in output file name"),
249 249 inst.args[0])
250 250
251 251 def make_file(repo, r, pat, node=None,
252 252 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
253 253 if not pat or pat == '-':
254 254 return 'w' in mode and sys.stdout or sys.stdin
255 255 if hasattr(pat, 'write') and 'w' in mode:
256 256 return pat
257 257 if hasattr(pat, 'read') and 'r' in mode:
258 258 return pat
259 259 return open(make_filename(repo, r, pat, node, total, seqno, revwidth,
260 260 pathname),
261 261 mode)
262 262
263 263 def dodiff(fp, ui, repo, node1, node2, files=None, match=util.always,
264 264 changes=None, text=False, opts={}):
265 265 if not changes:
266 266 changes = repo.changes(node1, node2, files, match=match)
267 267 modified, added, removed, deleted, unknown = changes
268 268 if files:
269 269 modified, added, removed = map(lambda x: filterfiles(files, x),
270 270 (modified, added, removed))
271 271
272 272 if not modified and not added and not removed:
273 273 return
274 274
275 275 if node2:
276 276 change = repo.changelog.read(node2)
277 277 mmap2 = repo.manifest.read(change[0])
278 278 date2 = util.datestr(change[2])
279 279 def read(f):
280 280 return repo.file(f).read(mmap2[f])
281 281 else:
282 282 date2 = util.datestr()
283 283 if not node1:
284 284 node1 = repo.dirstate.parents()[0]
285 285 def read(f):
286 286 return repo.wread(f)
287 287
288 288 if ui.quiet:
289 289 r = None
290 290 else:
291 291 hexfunc = ui.verbose and hex or short
292 292 r = [hexfunc(node) for node in [node1, node2] if node]
293 293
294 294 change = repo.changelog.read(node1)
295 295 mmap = repo.manifest.read(change[0])
296 296 date1 = util.datestr(change[2])
297 297
298 298 diffopts = ui.diffopts()
299 299 showfunc = opts.get('show_function') or diffopts['showfunc']
300 300 ignorews = opts.get('ignore_all_space') or diffopts['ignorews']
301 301 for f in modified:
302 302 to = None
303 303 if f in mmap:
304 304 to = repo.file(f).read(mmap[f])
305 305 tn = read(f)
306 306 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
307 307 showfunc=showfunc, ignorews=ignorews))
308 308 for f in added:
309 309 to = None
310 310 tn = read(f)
311 311 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
312 312 showfunc=showfunc, ignorews=ignorews))
313 313 for f in removed:
314 314 to = repo.file(f).read(mmap[f])
315 315 tn = None
316 316 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
317 317 showfunc=showfunc, ignorews=ignorews))
318 318
319 319 def trimuser(ui, name, rev, revcache):
320 320 """trim the name of the user who committed a change"""
321 321 user = revcache.get(rev)
322 322 if user is None:
323 323 user = revcache[rev] = ui.shortuser(name)
324 324 return user
325 325
326 326 def show_changeset(ui, repo, rev=0, changenode=None, brinfo=None):
327 327 """show a single changeset or file revision"""
328 328 log = repo.changelog
329 329 if changenode is None:
330 330 changenode = log.node(rev)
331 331 elif not rev:
332 332 rev = log.rev(changenode)
333 333
334 334 if ui.quiet:
335 335 ui.write("%d:%s\n" % (rev, short(changenode)))
336 336 return
337 337
338 338 changes = log.read(changenode)
339 339 date = util.datestr(changes[2])
340 340
341 341 parents = [(log.rev(p), ui.verbose and hex(p) or short(p))
342 342 for p in log.parents(changenode)
343 343 if ui.debugflag or p != nullid]
344 344 if not ui.debugflag and len(parents) == 1 and parents[0][0] == rev-1:
345 345 parents = []
346 346
347 347 if ui.verbose:
348 348 ui.write(_("changeset: %d:%s\n") % (rev, hex(changenode)))
349 349 else:
350 350 ui.write(_("changeset: %d:%s\n") % (rev, short(changenode)))
351 351
352 352 for tag in repo.nodetags(changenode):
353 353 ui.status(_("tag: %s\n") % tag)
354 354 for parent in parents:
355 355 ui.write(_("parent: %d:%s\n") % parent)
356 356
357 357 if brinfo and changenode in brinfo:
358 358 br = brinfo[changenode]
359 359 ui.write(_("branch: %s\n") % " ".join(br))
360 360
361 361 ui.debug(_("manifest: %d:%s\n") % (repo.manifest.rev(changes[0]),
362 362 hex(changes[0])))
363 363 ui.status(_("user: %s\n") % changes[1])
364 364 ui.status(_("date: %s\n") % date)
365 365
366 366 if ui.debugflag:
367 367 files = repo.changes(log.parents(changenode)[0], changenode)
368 368 for key, value in zip([_("files:"), _("files+:"), _("files-:")], files):
369 369 if value:
370 370 ui.note("%-12s %s\n" % (key, " ".join(value)))
371 371 else:
372 372 ui.note(_("files: %s\n") % " ".join(changes[3]))
373 373
374 374 description = changes[4].strip()
375 375 if description:
376 376 if ui.verbose:
377 377 ui.status(_("description:\n"))
378 378 ui.status(description)
379 379 ui.status("\n\n")
380 380 else:
381 381 ui.status(_("summary: %s\n") % description.splitlines()[0])
382 382 ui.status("\n")
383 383
384 384 def show_version(ui):
385 385 """output version and copyright information"""
386 386 ui.write(_("Mercurial Distributed SCM (version %s)\n")
387 387 % version.get_version())
388 388 ui.status(_(
389 389 "\nCopyright (C) 2005 Matt Mackall <mpm@selenic.com>\n"
390 390 "This is free software; see the source for copying conditions. "
391 391 "There is NO\nwarranty; "
392 392 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
393 393 ))
394 394
395 395 def help_(ui, cmd=None, with_version=False):
396 396 """show help for a given command or all commands"""
397 397 option_lists = []
398 398 if cmd and cmd != 'shortlist':
399 399 if with_version:
400 400 show_version(ui)
401 401 ui.write('\n')
402 402 aliases, i = find(cmd)
403 403 # synopsis
404 404 ui.write("%s\n\n" % i[2])
405 405
406 406 # description
407 407 doc = i[0].__doc__
408 408 if not doc:
409 409 doc = _("(No help text available)")
410 410 if ui.quiet:
411 411 doc = doc.splitlines(0)[0]
412 412 ui.write("%s\n" % doc.rstrip())
413 413
414 414 if not ui.quiet:
415 415 # aliases
416 416 if len(aliases) > 1:
417 417 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
418 418
419 419 # options
420 420 if i[1]:
421 421 option_lists.append(("options", i[1]))
422 422
423 423 else:
424 424 # program name
425 425 if ui.verbose or with_version:
426 426 show_version(ui)
427 427 else:
428 428 ui.status(_("Mercurial Distributed SCM\n"))
429 429 ui.status('\n')
430 430
431 431 # list of commands
432 432 if cmd == "shortlist":
433 433 ui.status(_('basic commands (use "hg help" '
434 434 'for the full list or option "-v" for details):\n\n'))
435 435 elif ui.verbose:
436 436 ui.status(_('list of commands:\n\n'))
437 437 else:
438 438 ui.status(_('list of commands (use "hg help -v" '
439 439 'to show aliases and global options):\n\n'))
440 440
441 441 h = {}
442 442 cmds = {}
443 443 for c, e in table.items():
444 444 f = c.split("|")[0]
445 445 if cmd == "shortlist" and not f.startswith("^"):
446 446 continue
447 447 f = f.lstrip("^")
448 448 if not ui.debugflag and f.startswith("debug"):
449 449 continue
450 d = ""
451 450 doc = e[0].__doc__
452 451 if not doc:
453 452 doc = _("(No help text available)")
454 453 h[f] = doc.splitlines(0)[0].rstrip()
455 454 cmds[f] = c.lstrip("^")
456 455
457 456 fns = h.keys()
458 457 fns.sort()
459 458 m = max(map(len, fns))
460 459 for f in fns:
461 460 if ui.verbose:
462 461 commands = cmds[f].replace("|",", ")
463 462 ui.write(" %s:\n %s\n"%(commands, h[f]))
464 463 else:
465 464 ui.write(' %-*s %s\n' % (m, f, h[f]))
466 465
467 466 # global options
468 467 if ui.verbose:
469 468 option_lists.append(("global options", globalopts))
470 469
471 470 # list all option lists
472 471 opt_output = []
473 472 for title, options in option_lists:
474 473 opt_output.append(("\n%s:\n" % title, None))
475 474 for shortopt, longopt, default, desc in options:
476 475 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
477 476 longopt and " --%s" % longopt),
478 477 "%s%s" % (desc,
479 478 default
480 479 and _(" (default: %s)") % default
481 480 or "")))
482 481
483 482 if opt_output:
484 483 opts_len = max([len(line[0]) for line in opt_output if line[1]])
485 484 for first, second in opt_output:
486 485 if second:
487 486 ui.write(" %-*s %s\n" % (opts_len, first, second))
488 487 else:
489 488 ui.write("%s\n" % first)
490 489
491 490 # Commands start here, listed alphabetically
492 491
493 492 def add(ui, repo, *pats, **opts):
494 493 """add the specified files on the next commit
495 494
496 495 Schedule files to be version controlled and added to the repository.
497 496
498 497 The files will be added to the repository at the next commit.
499 498
500 499 If no names are given, add all files in the repository.
501 500 """
502 501
503 502 names = []
504 503 for src, abs, rel, exact in walk(repo, pats, opts):
505 504 if exact:
506 505 if ui.verbose:
507 506 ui.status(_('adding %s\n') % rel)
508 507 names.append(abs)
509 508 elif repo.dirstate.state(abs) == '?':
510 509 ui.status(_('adding %s\n') % rel)
511 510 names.append(abs)
512 511 repo.add(names)
513 512
514 513 def addremove(ui, repo, *pats, **opts):
515 514 """add all new files, delete all missing files
516 515
517 516 Add all new files and remove all missing files from the repository.
518 517
519 518 New files are ignored if they match any of the patterns in .hgignore. As
520 519 with add, these changes take effect at the next commit.
521 520 """
522 521 return addremove_lock(ui, repo, pats, opts)
523 522
524 523 def addremove_lock(ui, repo, pats, opts, wlock=None):
525 524 add, remove = [], []
526 525 for src, abs, rel, exact in walk(repo, pats, opts):
527 526 if src == 'f' and repo.dirstate.state(abs) == '?':
528 527 add.append(abs)
529 528 if ui.verbose or not exact:
530 529 ui.status(_('adding %s\n') % ((pats and rel) or abs))
531 530 if repo.dirstate.state(abs) != 'r' and not os.path.exists(rel):
532 531 remove.append(abs)
533 532 if ui.verbose or not exact:
534 533 ui.status(_('removing %s\n') % ((pats and rel) or abs))
535 534 repo.add(add, wlock=wlock)
536 535 repo.remove(remove, wlock=wlock)
537 536
538 537 def annotate(ui, repo, *pats, **opts):
539 538 """show changeset information per file line
540 539
541 540 List changes in files, showing the revision id responsible for each line
542 541
543 542 This command is useful to discover who did a change or when a change took
544 543 place.
545 544
546 545 Without the -a option, annotate will avoid processing files it
547 546 detects as binary. With -a, annotate will generate an annotation
548 547 anyway, probably with undesirable results.
549 548 """
550 549 def getnode(rev):
551 550 return short(repo.changelog.node(rev))
552 551
553 552 ucache = {}
554 553 def getname(rev):
555 554 cl = repo.changelog.read(repo.changelog.node(rev))
556 555 return trimuser(ui, cl[1], rev, ucache)
557 556
558 557 dcache = {}
559 558 def getdate(rev):
560 559 datestr = dcache.get(rev)
561 560 if datestr is None:
562 561 cl = repo.changelog.read(repo.changelog.node(rev))
563 562 datestr = dcache[rev] = util.datestr(cl[2])
564 563 return datestr
565 564
566 565 if not pats:
567 566 raise util.Abort(_('at least one file name or pattern required'))
568 567
569 568 opmap = [['user', getname], ['number', str], ['changeset', getnode],
570 569 ['date', getdate]]
571 570 if not opts['user'] and not opts['changeset'] and not opts['date']:
572 571 opts['number'] = 1
573 572
574 573 if opts['rev']:
575 574 node = repo.changelog.lookup(opts['rev'])
576 575 else:
577 576 node = repo.dirstate.parents()[0]
578 577 change = repo.changelog.read(node)
579 578 mmap = repo.manifest.read(change[0])
580 579
581 580 for src, abs, rel, exact in walk(repo, pats, opts):
582 581 if abs not in mmap:
583 582 ui.warn(_("warning: %s is not in the repository!\n") %
584 583 ((pats and rel) or abs))
585 584 continue
586 585
587 586 f = repo.file(abs)
588 587 if not opts['text'] and util.binary(f.read(mmap[abs])):
589 588 ui.write(_("%s: binary file\n") % ((pats and rel) or abs))
590 589 continue
591 590
592 591 lines = f.annotate(mmap[abs])
593 592 pieces = []
594 593
595 594 for o, f in opmap:
596 595 if opts[o]:
597 596 l = [f(n) for n, dummy in lines]
598 597 if l:
599 598 m = max(map(len, l))
600 599 pieces.append(["%*s" % (m, x) for x in l])
601 600
602 601 if pieces:
603 602 for p, l in zip(zip(*pieces), lines):
604 603 ui.write("%s: %s" % (" ".join(p), l[1]))
605 604
606 605 def bundle(ui, repo, fname, dest="default-push", **opts):
607 606 """create a changegroup file
608 607
609 608 Generate a compressed changegroup file collecting all changesets
610 609 not found in the other repository.
611 610
612 611 This file can then be transferred using conventional means and
613 612 applied to another repository with the unbundle command. This is
614 613 useful when native push and pull are not available or when
615 614 exporting an entire repository is undesirable. The standard file
616 615 extension is ".hg".
617 616
618 617 Unlike import/export, this exactly preserves all changeset
619 618 contents including permissions, rename data, and revision history.
620 619 """
621 620 f = open(fname, "wb")
622 621 dest = ui.expandpath(dest, repo.root)
623 622 other = hg.repository(ui, dest)
624 623 o = repo.findoutgoing(other)
625 624 cg = repo.changegroup(o, 'bundle')
626 625
627 626 try:
628 627 f.write("HG10")
629 628 z = bz2.BZ2Compressor(9)
630 629 while 1:
631 630 chunk = cg.read(4096)
632 631 if not chunk:
633 632 break
634 633 f.write(z.compress(chunk))
635 634 f.write(z.flush())
636 635 except:
637 636 os.unlink(fname)
638 637 raise
639 638
640 639 def cat(ui, repo, file1, *pats, **opts):
641 640 """output the latest or given revisions of files
642 641
643 642 Print the specified files as they were at the given revision.
644 643 If no revision is given then the tip is used.
645 644
646 645 Output may be to a file, in which case the name of the file is
647 646 given using a format string. The formatting rules are the same as
648 647 for the export command, with the following additions:
649 648
650 649 %s basename of file being printed
651 650 %d dirname of file being printed, or '.' if in repo root
652 651 %p root-relative path name of file being printed
653 652 """
654 653 mf = {}
655 654 rev = opts['rev']
656 655 if rev:
657 656 node = repo.lookup(rev)
658 657 else:
659 658 node = repo.changelog.tip()
660 659 change = repo.changelog.read(node)
661 660 mf = repo.manifest.read(change[0])
662 661 for src, abs, rel, exact in walk(repo, (file1,) + pats, opts, node):
663 662 r = repo.file(abs)
664 663 n = mf[abs]
665 664 fp = make_file(repo, r, opts['output'], node=n, pathname=abs)
666 665 fp.write(r.read(n))
667 666
668 667 def clone(ui, source, dest=None, **opts):
669 668 """make a copy of an existing repository
670 669
671 670 Create a copy of an existing repository in a new directory.
672 671
673 672 If no destination directory name is specified, it defaults to the
674 673 basename of the source.
675 674
676 675 The location of the source is added to the new repository's
677 676 .hg/hgrc file, as the default to be used for future pulls.
678 677
679 678 For efficiency, hardlinks are used for cloning whenever the source
680 679 and destination are on the same filesystem. Some filesystems,
681 680 such as AFS, implement hardlinking incorrectly, but do not report
682 681 errors. In these cases, use the --pull option to avoid
683 682 hardlinking.
684 683 """
685 684 if dest is None:
686 685 dest = os.path.basename(os.path.normpath(source))
687 686
688 687 if os.path.exists(dest):
689 688 raise util.Abort(_("destination '%s' already exists"), dest)
690 689
691 690 dest = os.path.realpath(dest)
692 691
693 692 class Dircleanup(object):
694 693 def __init__(self, dir_):
695 694 self.rmtree = shutil.rmtree
696 695 self.dir_ = dir_
697 696 os.mkdir(dir_)
698 697 def close(self):
699 698 self.dir_ = None
700 699 def __del__(self):
701 700 if self.dir_:
702 701 self.rmtree(self.dir_, True)
703 702
704 703 if opts['ssh']:
705 704 ui.setconfig("ui", "ssh", opts['ssh'])
706 705 if opts['remotecmd']:
707 706 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
708 707
709 708 if not os.path.exists(source):
710 709 source = ui.expandpath(source)
711 710
712 711 d = Dircleanup(dest)
713 712 abspath = source
714 713 other = hg.repository(ui, source)
715 714
716 715 copy = False
717 716 if other.dev() != -1:
718 717 abspath = os.path.abspath(source)
719 718 if not opts['pull'] and not opts['rev']:
720 719 copy = True
721 720
722 721 if copy:
723 722 try:
724 723 # we use a lock here because if we race with commit, we
725 724 # can end up with extra data in the cloned revlogs that's
726 725 # not pointed to by changesets, thus causing verify to
727 726 # fail
728 727 l1 = lock.lock(os.path.join(source, ".hg", "lock"))
729 728 except OSError:
730 729 copy = False
731 730
732 731 if copy:
733 732 # we lock here to avoid premature writing to the target
734 733 os.mkdir(os.path.join(dest, ".hg"))
735 734 l2 = lock.lock(os.path.join(dest, ".hg", "lock"))
736 735
737 736 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
738 737 for f in files.split():
739 738 src = os.path.join(source, ".hg", f)
740 739 dst = os.path.join(dest, ".hg", f)
741 740 try:
742 741 util.copyfiles(src, dst)
743 742 except OSError, inst:
744 743 if inst.errno != errno.ENOENT:
745 744 raise
746 745
747 746 repo = hg.repository(ui, dest)
748 747
749 748 else:
750 749 revs = None
751 750 if opts['rev']:
752 751 if not other.local():
753 752 error = _("clone -r not supported yet for remote repositories.")
754 753 raise util.Abort(error)
755 754 else:
756 755 revs = [other.lookup(rev) for rev in opts['rev']]
757 756 repo = hg.repository(ui, dest, create=1)
758 757 repo.pull(other, heads = revs)
759 758
760 759 f = repo.opener("hgrc", "w", text=True)
761 760 f.write("[paths]\n")
762 761 f.write("default = %s\n" % abspath)
763 762 f.close()
764 763
765 764 if not opts['noupdate']:
766 765 update(ui, repo)
767 766
768 767 d.close()
769 768
770 769 def commit(ui, repo, *pats, **opts):
771 770 """commit the specified files or all outstanding changes
772 771
773 772 Commit changes to the given files into the repository.
774 773
775 774 If a list of files is omitted, all changes reported by "hg status"
776 775 will be commited.
777 776
778 777 The HGEDITOR or EDITOR environment variables are used to start an
779 778 editor to add a commit comment.
780 779 """
781 780 message = opts['message']
782 781 logfile = opts['logfile']
783 782
784 783 if message and logfile:
785 784 raise util.Abort(_('options --message and --logfile are mutually '
786 785 'exclusive'))
787 786 if not message and logfile:
788 787 try:
789 788 if logfile == '-':
790 789 message = sys.stdin.read()
791 790 else:
792 791 message = open(logfile).read()
793 792 except IOError, inst:
794 793 raise util.Abort(_("can't read commit message '%s': %s") %
795 794 (logfile, inst.strerror))
796 795
797 796 if opts['addremove']:
798 797 addremove(ui, repo, *pats, **opts)
799 798 fns, match, anypats = matchpats(repo, pats, opts)
800 799 if pats:
801 800 modified, added, removed, deleted, unknown = (
802 801 repo.changes(files=fns, match=match))
803 802 files = modified + added + removed
804 803 else:
805 804 files = []
806 805 try:
807 806 repo.commit(files, message, opts['user'], opts['date'], match)
808 807 except ValueError, inst:
809 808 raise util.Abort(str(inst))
810 809
811 810 def docopy(ui, repo, pats, opts):
812 811 cwd = repo.getcwd()
813 812 errors = 0
814 813 copied = []
815 814 targets = {}
816 815
817 816 def okaytocopy(abs, rel, exact):
818 817 reasons = {'?': _('is not managed'),
819 818 'a': _('has been marked for add'),
820 819 'r': _('has been marked for remove')}
821 820 state = repo.dirstate.state(abs)
822 821 reason = reasons.get(state)
823 822 if reason:
824 823 if state == 'a':
825 824 origsrc = repo.dirstate.copied(abs)
826 825 if origsrc is not None:
827 826 return origsrc
828 827 if exact:
829 828 ui.warn(_('%s: not copying - file %s\n') % (rel, reason))
830 829 else:
831 830 return abs
832 831
833 832 def copy(origsrc, abssrc, relsrc, target, exact):
834 833 abstarget = util.canonpath(repo.root, cwd, target)
835 834 reltarget = util.pathto(cwd, abstarget)
836 835 prevsrc = targets.get(abstarget)
837 836 if prevsrc is not None:
838 837 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
839 838 (reltarget, abssrc, prevsrc))
840 839 return
841 840 if (not opts['after'] and os.path.exists(reltarget) or
842 841 opts['after'] and repo.dirstate.state(abstarget) not in '?r'):
843 842 if not opts['force']:
844 843 ui.warn(_('%s: not overwriting - file exists\n') %
845 844 reltarget)
846 845 return
847 846 if not opts['after']:
848 847 os.unlink(reltarget)
849 848 if opts['after']:
850 849 if not os.path.exists(reltarget):
851 850 return
852 851 else:
853 852 targetdir = os.path.dirname(reltarget) or '.'
854 853 if not os.path.isdir(targetdir):
855 854 os.makedirs(targetdir)
856 855 try:
857 856 shutil.copyfile(relsrc, reltarget)
858 857 shutil.copymode(relsrc, reltarget)
859 858 except shutil.Error, inst:
860 859 raise util.Abort(str(inst))
861 860 except IOError, inst:
862 861 if inst.errno == errno.ENOENT:
863 862 ui.warn(_('%s: deleted in working copy\n') % relsrc)
864 863 else:
865 864 ui.warn(_('%s: cannot copy - %s\n') %
866 865 (relsrc, inst.strerror))
867 866 errors += 1
868 867 return
869 868 if ui.verbose or not exact:
870 869 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
871 870 targets[abstarget] = abssrc
872 871 repo.copy(origsrc, abstarget)
873 872 copied.append((abssrc, relsrc, exact))
874 873
875 874 def targetpathfn(pat, dest, srcs):
876 875 if os.path.isdir(pat):
877 876 abspfx = util.canonpath(repo.root, cwd, pat)
878 877 if destdirexists:
879 878 striplen = len(os.path.split(abspfx)[0])
880 879 else:
881 880 striplen = len(abspfx)
882 881 if striplen:
883 882 striplen += len(os.sep)
884 883 res = lambda p: os.path.join(dest, p[striplen:])
885 884 elif destdirexists:
886 885 res = lambda p: os.path.join(dest, os.path.basename(p))
887 886 else:
888 887 res = lambda p: dest
889 888 return res
890 889
891 890 def targetpathafterfn(pat, dest, srcs):
892 891 if util.patkind(pat, None)[0]:
893 892 # a mercurial pattern
894 893 res = lambda p: os.path.join(dest, os.path.basename(p))
895 894 else:
896 895 abspfx = util.canonpath(repo.root, cwd, pat)
897 896 if len(abspfx) < len(srcs[0][0]):
898 897 # A directory. Either the target path contains the last
899 898 # component of the source path or it does not.
900 899 def evalpath(striplen):
901 900 score = 0
902 901 for s in srcs:
903 902 t = os.path.join(dest, s[0][striplen:])
904 903 if os.path.exists(t):
905 904 score += 1
906 905 return score
907 906
908 907 striplen = len(abspfx)
909 908 if striplen:
910 909 striplen += len(os.sep)
911 910 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
912 911 score = evalpath(striplen)
913 912 striplen1 = len(os.path.split(abspfx)[0])
914 913 if striplen1:
915 914 striplen1 += len(os.sep)
916 915 if evalpath(striplen1) > score:
917 916 striplen = striplen1
918 917 res = lambda p: os.path.join(dest, p[striplen:])
919 918 else:
920 919 # a file
921 920 if destdirexists:
922 921 res = lambda p: os.path.join(dest, os.path.basename(p))
923 922 else:
924 923 res = lambda p: dest
925 924 return res
926 925
927 926
928 927 pats = list(pats)
929 928 if not pats:
930 929 raise util.Abort(_('no source or destination specified'))
931 930 if len(pats) == 1:
932 931 raise util.Abort(_('no destination specified'))
933 932 dest = pats.pop()
934 933 destdirexists = os.path.isdir(dest)
935 934 if (len(pats) > 1 or util.patkind(pats[0], None)[0]) and not destdirexists:
936 935 raise util.Abort(_('with multiple sources, destination must be an '
937 936 'existing directory'))
938 937 if opts['after']:
939 938 tfn = targetpathafterfn
940 939 else:
941 940 tfn = targetpathfn
942 941 copylist = []
943 942 for pat in pats:
944 943 srcs = []
945 944 for tag, abssrc, relsrc, exact in walk(repo, [pat], opts):
946 945 origsrc = okaytocopy(abssrc, relsrc, exact)
947 946 if origsrc:
948 947 srcs.append((origsrc, abssrc, relsrc, exact))
949 948 if not srcs:
950 949 continue
951 950 copylist.append((tfn(pat, dest, srcs), srcs))
952 951 if not copylist:
953 952 raise util.Abort(_('no files to copy'))
954 953
955 954 for targetpath, srcs in copylist:
956 955 for origsrc, abssrc, relsrc, exact in srcs:
957 956 copy(origsrc, abssrc, relsrc, targetpath(abssrc), exact)
958 957
959 958 if errors:
960 959 ui.warn(_('(consider using --after)\n'))
961 960 return errors, copied
962 961
963 962 def copy(ui, repo, *pats, **opts):
964 963 """mark files as copied for the next commit
965 964
966 965 Mark dest as having copies of source files. If dest is a
967 966 directory, copies are put in that directory. If dest is a file,
968 967 there can only be one source.
969 968
970 969 By default, this command copies the contents of files as they
971 970 stand in the working directory. If invoked with --after, the
972 971 operation is recorded, but no copying is performed.
973 972
974 973 This command takes effect in the next commit.
975 974
976 975 NOTE: This command should be treated as experimental. While it
977 976 should properly record copied files, this information is not yet
978 977 fully used by merge, nor fully reported by log.
979 978 """
980 979 errs, copied = docopy(ui, repo, pats, opts)
981 980 return errs
982 981
983 982 def debugancestor(ui, index, rev1, rev2):
984 983 """find the ancestor revision of two revisions in a given index"""
985 984 r = revlog.revlog(util.opener(os.getcwd()), index, "")
986 985 a = r.ancestor(r.lookup(rev1), r.lookup(rev2))
987 986 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
988 987
989 988 def debugcheckstate(ui, repo):
990 989 """validate the correctness of the current dirstate"""
991 990 parent1, parent2 = repo.dirstate.parents()
992 991 repo.dirstate.read()
993 992 dc = repo.dirstate.map
994 993 keys = dc.keys()
995 994 keys.sort()
996 995 m1n = repo.changelog.read(parent1)[0]
997 996 m2n = repo.changelog.read(parent2)[0]
998 997 m1 = repo.manifest.read(m1n)
999 998 m2 = repo.manifest.read(m2n)
1000 999 errors = 0
1001 1000 for f in dc:
1002 1001 state = repo.dirstate.state(f)
1003 1002 if state in "nr" and f not in m1:
1004 1003 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1005 1004 errors += 1
1006 1005 if state in "a" and f in m1:
1007 1006 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1008 1007 errors += 1
1009 1008 if state in "m" and f not in m1 and f not in m2:
1010 1009 ui.warn(_("%s in state %s, but not in either manifest\n") %
1011 1010 (f, state))
1012 1011 errors += 1
1013 1012 for f in m1:
1014 1013 state = repo.dirstate.state(f)
1015 1014 if state not in "nrm":
1016 1015 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1017 1016 errors += 1
1018 1017 if errors:
1019 1018 error = _(".hg/dirstate inconsistent with current parent's manifest")
1020 1019 raise util.Abort(error)
1021 1020
1022 1021 def debugconfig(ui):
1023 1022 """show combined config settings from all hgrc files"""
1024 1023 try:
1025 1024 repo = hg.repository(ui)
1026 1025 except hg.RepoError:
1027 1026 pass
1028 1027 for section, name, value in ui.walkconfig():
1029 1028 ui.write('%s.%s=%s\n' % (section, name, value))
1030 1029
1031 1030 def debugsetparents(ui, repo, rev1, rev2=None):
1032 1031 """manually set the parents of the current working directory
1033 1032
1034 1033 This is useful for writing repository conversion tools, but should
1035 1034 be used with care.
1036 1035 """
1037 1036
1038 1037 if not rev2:
1039 1038 rev2 = hex(nullid)
1040 1039
1041 1040 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
1042 1041
1043 1042 def debugstate(ui, repo):
1044 1043 """show the contents of the current dirstate"""
1045 1044 repo.dirstate.read()
1046 1045 dc = repo.dirstate.map
1047 1046 keys = dc.keys()
1048 1047 keys.sort()
1049 1048 for file_ in keys:
1050 1049 ui.write("%c %3o %10d %s %s\n"
1051 1050 % (dc[file_][0], dc[file_][1] & 0777, dc[file_][2],
1052 1051 time.strftime("%x %X",
1053 1052 time.localtime(dc[file_][3])), file_))
1054 1053 for f in repo.dirstate.copies:
1055 1054 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copies[f], f))
1056 1055
1057 1056 def debugdata(ui, file_, rev):
1058 1057 """dump the contents of an data file revision"""
1059 1058 r = revlog.revlog(util.opener(os.getcwd()), file_[:-2] + ".i", file_)
1060 1059 try:
1061 1060 ui.write(r.revision(r.lookup(rev)))
1062 1061 except KeyError:
1063 1062 raise util.Abort(_('invalid revision identifier %s'), rev)
1064 1063
1065 1064 def debugindex(ui, file_):
1066 1065 """dump the contents of an index file"""
1067 1066 r = revlog.revlog(util.opener(os.getcwd()), file_, "")
1068 1067 ui.write(" rev offset length base linkrev" +
1069 1068 " nodeid p1 p2\n")
1070 1069 for i in range(r.count()):
1071 1070 e = r.index[i]
1072 1071 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1073 1072 i, e[0], e[1], e[2], e[3],
1074 1073 short(e[6]), short(e[4]), short(e[5])))
1075 1074
1076 1075 def debugindexdot(ui, file_):
1077 1076 """dump an index DAG as a .dot file"""
1078 1077 r = revlog.revlog(util.opener(os.getcwd()), file_, "")
1079 1078 ui.write("digraph G {\n")
1080 1079 for i in range(r.count()):
1081 1080 e = r.index[i]
1082 1081 ui.write("\t%d -> %d\n" % (r.rev(e[4]), i))
1083 1082 if e[5] != nullid:
1084 1083 ui.write("\t%d -> %d\n" % (r.rev(e[5]), i))
1085 1084 ui.write("}\n")
1086 1085
1087 1086 def debugrename(ui, repo, file, rev=None):
1088 1087 """dump rename information"""
1089 1088 r = repo.file(relpath(repo, [file])[0])
1090 1089 if rev:
1091 1090 try:
1092 1091 # assume all revision numbers are for changesets
1093 1092 n = repo.lookup(rev)
1094 1093 change = repo.changelog.read(n)
1095 1094 m = repo.manifest.read(change[0])
1096 1095 n = m[relpath(repo, [file])[0]]
1097 1096 except (hg.RepoError, KeyError):
1098 1097 n = r.lookup(rev)
1099 1098 else:
1100 1099 n = r.tip()
1101 1100 m = r.renamed(n)
1102 1101 if m:
1103 1102 ui.write(_("renamed from %s:%s\n") % (m[0], hex(m[1])))
1104 1103 else:
1105 1104 ui.write(_("not renamed\n"))
1106 1105
1107 1106 def debugwalk(ui, repo, *pats, **opts):
1108 1107 """show how files match on given patterns"""
1109 1108 items = list(walk(repo, pats, opts))
1110 1109 if not items:
1111 1110 return
1112 1111 fmt = '%%s %%-%ds %%-%ds %%s' % (
1113 1112 max([len(abs) for (src, abs, rel, exact) in items]),
1114 1113 max([len(rel) for (src, abs, rel, exact) in items]))
1115 1114 for src, abs, rel, exact in items:
1116 1115 line = fmt % (src, abs, rel, exact and 'exact' or '')
1117 1116 ui.write("%s\n" % line.rstrip())
1118 1117
1119 1118 def diff(ui, repo, *pats, **opts):
1120 1119 """diff repository (or selected files)
1121 1120
1122 1121 Show differences between revisions for the specified files.
1123 1122
1124 1123 Differences between files are shown using the unified diff format.
1125 1124
1126 1125 When two revision arguments are given, then changes are shown
1127 1126 between those revisions. If only one revision is specified then
1128 1127 that revision is compared to the working directory, and, when no
1129 1128 revisions are specified, the working directory files are compared
1130 1129 to its parent.
1131 1130
1132 1131 Without the -a option, diff will avoid generating diffs of files
1133 1132 it detects as binary. With -a, diff will generate a diff anyway,
1134 1133 probably with undesirable results.
1135 1134 """
1136 1135 node1, node2 = None, None
1137 1136 revs = [repo.lookup(x) for x in opts['rev']]
1138 1137
1139 1138 if len(revs) > 0:
1140 1139 node1 = revs[0]
1141 1140 if len(revs) > 1:
1142 1141 node2 = revs[1]
1143 1142 if len(revs) > 2:
1144 1143 raise util.Abort(_("too many revisions to diff"))
1145 1144
1146 1145 fns, matchfn, anypats = matchpats(repo, pats, opts)
1147 1146
1148 1147 dodiff(sys.stdout, ui, repo, node1, node2, fns, match=matchfn,
1149 1148 text=opts['text'], opts=opts)
1150 1149
1151 1150 def doexport(ui, repo, changeset, seqno, total, revwidth, opts):
1152 1151 node = repo.lookup(changeset)
1153 1152 parents = [p for p in repo.changelog.parents(node) if p != nullid]
1154 1153 if opts['switch_parent']:
1155 1154 parents.reverse()
1156 1155 prev = (parents and parents[0]) or nullid
1157 1156 change = repo.changelog.read(node)
1158 1157
1159 1158 fp = make_file(repo, repo.changelog, opts['output'],
1160 1159 node=node, total=total, seqno=seqno,
1161 1160 revwidth=revwidth)
1162 1161 if fp != sys.stdout:
1163 1162 ui.note("%s\n" % fp.name)
1164 1163
1165 1164 fp.write("# HG changeset patch\n")
1166 1165 fp.write("# User %s\n" % change[1])
1167 1166 fp.write("# Node ID %s\n" % hex(node))
1168 1167 fp.write("# Parent %s\n" % hex(prev))
1169 1168 if len(parents) > 1:
1170 1169 fp.write("# Parent %s\n" % hex(parents[1]))
1171 1170 fp.write(change[4].rstrip())
1172 1171 fp.write("\n\n")
1173 1172
1174 1173 dodiff(fp, ui, repo, prev, node, text=opts['text'])
1175 1174 if fp != sys.stdout:
1176 1175 fp.close()
1177 1176
1178 1177 def export(ui, repo, *changesets, **opts):
1179 1178 """dump the header and diffs for one or more changesets
1180 1179
1181 1180 Print the changeset header and diffs for one or more revisions.
1182 1181
1183 1182 The information shown in the changeset header is: author,
1184 1183 changeset hash, parent and commit comment.
1185 1184
1186 1185 Output may be to a file, in which case the name of the file is
1187 1186 given using a format string. The formatting rules are as follows:
1188 1187
1189 1188 %% literal "%" character
1190 1189 %H changeset hash (40 bytes of hexadecimal)
1191 1190 %N number of patches being generated
1192 1191 %R changeset revision number
1193 1192 %b basename of the exporting repository
1194 1193 %h short-form changeset hash (12 bytes of hexadecimal)
1195 1194 %n zero-padded sequence number, starting at 1
1196 1195 %r zero-padded changeset revision number
1197 1196
1198 1197 Without the -a option, export will avoid generating diffs of files
1199 1198 it detects as binary. With -a, export will generate a diff anyway,
1200 1199 probably with undesirable results.
1201 1200
1202 1201 With the --switch-parent option, the diff will be against the second
1203 1202 parent. It can be useful to review a merge.
1204 1203 """
1205 1204 if not changesets:
1206 1205 raise util.Abort(_("export requires at least one changeset"))
1207 1206 seqno = 0
1208 1207 revs = list(revrange(ui, repo, changesets))
1209 1208 total = len(revs)
1210 1209 revwidth = max(map(len, revs))
1211 1210 msg = len(revs) > 1 and _("Exporting patches:\n") or _("Exporting patch:\n")
1212 1211 ui.note(msg)
1213 1212 for cset in revs:
1214 1213 seqno += 1
1215 1214 doexport(ui, repo, cset, seqno, total, revwidth, opts)
1216 1215
1217 1216 def forget(ui, repo, *pats, **opts):
1218 1217 """don't add the specified files on the next commit
1219 1218
1220 1219 Undo an 'hg add' scheduled for the next commit.
1221 1220 """
1222 1221 forget = []
1223 1222 for src, abs, rel, exact in walk(repo, pats, opts):
1224 1223 if repo.dirstate.state(abs) == 'a':
1225 1224 forget.append(abs)
1226 1225 if ui.verbose or not exact:
1227 1226 ui.status(_('forgetting %s\n') % ((pats and rel) or abs))
1228 1227 repo.forget(forget)
1229 1228
1230 1229 def grep(ui, repo, pattern, *pats, **opts):
1231 1230 """search for a pattern in specified files and revisions
1232 1231
1233 1232 Search revisions of files for a regular expression.
1234 1233
1235 1234 This command behaves differently than Unix grep. It only accepts
1236 1235 Python/Perl regexps. It searches repository history, not the
1237 1236 working directory. It always prints the revision number in which
1238 1237 a match appears.
1239 1238
1240 1239 By default, grep only prints output for the first revision of a
1241 1240 file in which it finds a match. To get it to print every revision
1242 1241 that contains a change in match status ("-" for a match that
1243 1242 becomes a non-match, or "+" for a non-match that becomes a match),
1244 1243 use the --all flag.
1245 1244 """
1246 1245 reflags = 0
1247 1246 if opts['ignore_case']:
1248 1247 reflags |= re.I
1249 1248 regexp = re.compile(pattern, reflags)
1250 1249 sep, eol = ':', '\n'
1251 1250 if opts['print0']:
1252 1251 sep = eol = '\0'
1253 1252
1254 1253 fcache = {}
1255 1254 def getfile(fn):
1256 1255 if fn not in fcache:
1257 1256 fcache[fn] = repo.file(fn)
1258 1257 return fcache[fn]
1259 1258
1260 1259 def matchlines(body):
1261 1260 begin = 0
1262 1261 linenum = 0
1263 1262 while True:
1264 1263 match = regexp.search(body, begin)
1265 1264 if not match:
1266 1265 break
1267 1266 mstart, mend = match.span()
1268 1267 linenum += body.count('\n', begin, mstart) + 1
1269 1268 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1270 1269 lend = body.find('\n', mend)
1271 1270 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1272 1271 begin = lend + 1
1273 1272
1274 1273 class linestate(object):
1275 1274 def __init__(self, line, linenum, colstart, colend):
1276 1275 self.line = line
1277 1276 self.linenum = linenum
1278 1277 self.colstart = colstart
1279 1278 self.colend = colend
1280 1279 def __eq__(self, other):
1281 1280 return self.line == other.line
1282 1281 def __hash__(self):
1283 1282 return hash(self.line)
1284 1283
1285 1284 matches = {}
1286 1285 def grepbody(fn, rev, body):
1287 1286 matches[rev].setdefault(fn, {})
1288 1287 m = matches[rev][fn]
1289 1288 for lnum, cstart, cend, line in matchlines(body):
1290 1289 s = linestate(line, lnum, cstart, cend)
1291 1290 m[s] = s
1292 1291
1292 # FIXME: prev isn't used, why ?
1293 1293 prev = {}
1294 1294 ucache = {}
1295 1295 def display(fn, rev, states, prevstates):
1296 1296 diff = list(sets.Set(states).symmetric_difference(sets.Set(prevstates)))
1297 1297 diff.sort(lambda x, y: cmp(x.linenum, y.linenum))
1298 1298 counts = {'-': 0, '+': 0}
1299 1299 filerevmatches = {}
1300 1300 for l in diff:
1301 1301 if incrementing or not opts['all']:
1302 1302 change = ((l in prevstates) and '-') or '+'
1303 1303 r = rev
1304 1304 else:
1305 1305 change = ((l in states) and '-') or '+'
1306 1306 r = prev[fn]
1307 1307 cols = [fn, str(rev)]
1308 1308 if opts['line_number']:
1309 1309 cols.append(str(l.linenum))
1310 1310 if opts['all']:
1311 1311 cols.append(change)
1312 1312 if opts['user']:
1313 1313 cols.append(trimuser(ui, getchange(rev)[1], rev,
1314 1314 ucache))
1315 1315 if opts['files_with_matches']:
1316 1316 c = (fn, rev)
1317 1317 if c in filerevmatches:
1318 1318 continue
1319 1319 filerevmatches[c] = 1
1320 1320 else:
1321 1321 cols.append(l.line)
1322 1322 ui.write(sep.join(cols), eol)
1323 1323 counts[change] += 1
1324 1324 return counts['+'], counts['-']
1325 1325
1326 1326 fstate = {}
1327 1327 skip = {}
1328 1328 changeiter, getchange, matchfn = walkchangerevs(ui, repo, pats, opts)
1329 1329 count = 0
1330 1330 incrementing = False
1331 1331 for st, rev, fns in changeiter:
1332 1332 if st == 'window':
1333 1333 incrementing = rev
1334 1334 matches.clear()
1335 1335 elif st == 'add':
1336 1336 change = repo.changelog.read(repo.lookup(str(rev)))
1337 1337 mf = repo.manifest.read(change[0])
1338 1338 matches[rev] = {}
1339 1339 for fn in fns:
1340 1340 if fn in skip:
1341 1341 continue
1342 1342 fstate.setdefault(fn, {})
1343 1343 try:
1344 1344 grepbody(fn, rev, getfile(fn).read(mf[fn]))
1345 1345 except KeyError:
1346 1346 pass
1347 1347 elif st == 'iter':
1348 1348 states = matches[rev].items()
1349 1349 states.sort()
1350 1350 for fn, m in states:
1351 1351 if fn in skip:
1352 1352 continue
1353 1353 if incrementing or not opts['all'] or fstate[fn]:
1354 1354 pos, neg = display(fn, rev, m, fstate[fn])
1355 1355 count += pos + neg
1356 1356 if pos and not opts['all']:
1357 1357 skip[fn] = True
1358 1358 fstate[fn] = m
1359 1359 prev[fn] = rev
1360 1360
1361 1361 if not incrementing:
1362 1362 fstate = fstate.items()
1363 1363 fstate.sort()
1364 1364 for fn, state in fstate:
1365 1365 if fn in skip:
1366 1366 continue
1367 1367 display(fn, rev, {}, state)
1368 1368 return (count == 0 and 1) or 0
1369 1369
1370 1370 def heads(ui, repo, **opts):
1371 1371 """show current repository heads
1372 1372
1373 1373 Show all repository head changesets.
1374 1374
1375 1375 Repository "heads" are changesets that don't have children
1376 1376 changesets. They are where development generally takes place and
1377 1377 are the usual targets for update and merge operations.
1378 1378 """
1379 1379 if opts['rev']:
1380 1380 heads = repo.heads(repo.lookup(opts['rev']))
1381 1381 else:
1382 1382 heads = repo.heads()
1383 1383 br = None
1384 1384 if opts['branches']:
1385 1385 br = repo.branchlookup(heads)
1386 1386 for n in heads:
1387 1387 show_changeset(ui, repo, changenode=n, brinfo=br)
1388 1388
1389 1389 def identify(ui, repo):
1390 1390 """print information about the working copy
1391 1391
1392 1392 Print a short summary of the current state of the repo.
1393 1393
1394 1394 This summary identifies the repository state using one or two parent
1395 1395 hash identifiers, followed by a "+" if there are uncommitted changes
1396 1396 in the working directory, followed by a list of tags for this revision.
1397 1397 """
1398 1398 parents = [p for p in repo.dirstate.parents() if p != nullid]
1399 1399 if not parents:
1400 1400 ui.write(_("unknown\n"))
1401 1401 return
1402 1402
1403 1403 hexfunc = ui.verbose and hex or short
1404 1404 modified, added, removed, deleted, unknown = repo.changes()
1405 1405 output = ["%s%s" %
1406 1406 ('+'.join([hexfunc(parent) for parent in parents]),
1407 1407 (modified or added or removed or deleted) and "+" or "")]
1408 1408
1409 1409 if not ui.quiet:
1410 1410 # multiple tags for a single parent separated by '/'
1411 1411 parenttags = ['/'.join(tags)
1412 1412 for tags in map(repo.nodetags, parents) if tags]
1413 1413 # tags for multiple parents separated by ' + '
1414 1414 if parenttags:
1415 1415 output.append(' + '.join(parenttags))
1416 1416
1417 1417 ui.write("%s\n" % ' '.join(output))
1418 1418
1419 1419 def import_(ui, repo, patch1, *patches, **opts):
1420 1420 """import an ordered set of patches
1421 1421
1422 1422 Import a list of patches and commit them individually.
1423 1423
1424 1424 If there are outstanding changes in the working directory, import
1425 1425 will abort unless given the -f flag.
1426 1426
1427 1427 If a patch looks like a mail message (its first line starts with
1428 1428 "From " or looks like an RFC822 header), it will not be applied
1429 1429 unless the -f option is used. The importer neither parses nor
1430 1430 discards mail headers, so use -f only to override the "mailness"
1431 1431 safety check, not to import a real mail message.
1432 1432 """
1433 1433 patches = (patch1,) + patches
1434 1434
1435 1435 if not opts['force']:
1436 1436 modified, added, removed, deleted, unknown = repo.changes()
1437 1437 if modified or added or removed or deleted:
1438 1438 raise util.Abort(_("outstanding uncommitted changes"))
1439 1439
1440 1440 d = opts["base"]
1441 1441 strip = opts["strip"]
1442 1442
1443 1443 mailre = re.compile(r'(?:From |[\w-]+:)')
1444 1444
1445 1445 # attempt to detect the start of a patch
1446 1446 # (this heuristic is borrowed from quilt)
1447 1447 diffre = re.compile(r'(?:Index:[ \t]|diff[ \t]|RCS file: |' +
1448 1448 'retrieving revision [0-9]+(\.[0-9]+)*$|' +
1449 1449 '(---|\*\*\*)[ \t])')
1450 1450
1451 1451 for patch in patches:
1452 1452 ui.status(_("applying %s\n") % patch)
1453 1453 pf = os.path.join(d, patch)
1454 1454
1455 1455 message = []
1456 1456 user = None
1457 1457 hgpatch = False
1458 1458 for line in file(pf):
1459 1459 line = line.rstrip()
1460 1460 if (not message and not hgpatch and
1461 1461 mailre.match(line) and not opts['force']):
1462 1462 if len(line) > 35:
1463 1463 line = line[:32] + '...'
1464 1464 raise util.Abort(_('first line looks like a '
1465 1465 'mail header: ') + line)
1466 1466 if diffre.match(line):
1467 1467 break
1468 1468 elif hgpatch:
1469 1469 # parse values when importing the result of an hg export
1470 1470 if line.startswith("# User "):
1471 1471 user = line[7:]
1472 1472 ui.debug(_('User: %s\n') % user)
1473 1473 elif not line.startswith("# ") and line:
1474 1474 message.append(line)
1475 1475 hgpatch = False
1476 1476 elif line == '# HG changeset patch':
1477 1477 hgpatch = True
1478 1478 message = [] # We may have collected garbage
1479 1479 else:
1480 1480 message.append(line)
1481 1481
1482 1482 # make sure message isn't empty
1483 1483 if not message:
1484 1484 message = _("imported patch %s\n") % patch
1485 1485 else:
1486 1486 message = "%s\n" % '\n'.join(message)
1487 1487 ui.debug(_('message:\n%s\n') % message)
1488 1488
1489 1489 files = util.patch(strip, pf, ui)
1490 1490
1491 1491 if len(files) > 0:
1492 1492 addremove(ui, repo, *files)
1493 1493 repo.commit(files, message, user)
1494 1494
1495 1495 def incoming(ui, repo, source="default", **opts):
1496 1496 """show new changesets found in source
1497 1497
1498 1498 Show new changesets found in the specified repo or the default
1499 1499 pull repo. These are the changesets that would be pulled if a pull
1500 1500 was requested.
1501 1501
1502 1502 Currently only local repositories are supported.
1503 1503 """
1504 1504 source = ui.expandpath(source, repo.root)
1505 1505 other = hg.repository(ui, source)
1506 1506 if not other.local():
1507 1507 raise util.Abort(_("incoming doesn't work for remote repositories yet"))
1508 1508 o = repo.findincoming(other)
1509 1509 if not o:
1510 1510 return
1511 1511 o = other.changelog.nodesbetween(o)[0]
1512 1512 if opts['newest_first']:
1513 1513 o.reverse()
1514 1514 for n in o:
1515 1515 parents = [p for p in other.changelog.parents(n) if p != nullid]
1516 1516 if opts['no_merges'] and len(parents) == 2:
1517 1517 continue
1518 1518 show_changeset(ui, other, changenode=n)
1519 1519 if opts['patch']:
1520 1520 prev = (parents and parents[0]) or nullid
1521 1521 dodiff(ui, ui, other, prev, n)
1522 1522 ui.write("\n")
1523 1523
1524 1524 def init(ui, dest="."):
1525 1525 """create a new repository in the given directory
1526 1526
1527 1527 Initialize a new repository in the given directory. If the given
1528 1528 directory does not exist, it is created.
1529 1529
1530 1530 If no directory is given, the current directory is used.
1531 1531 """
1532 1532 if not os.path.exists(dest):
1533 1533 os.mkdir(dest)
1534 1534 hg.repository(ui, dest, create=1)
1535 1535
1536 1536 def locate(ui, repo, *pats, **opts):
1537 1537 """locate files matching specific patterns
1538 1538
1539 1539 Print all files under Mercurial control whose names match the
1540 1540 given patterns.
1541 1541
1542 1542 This command searches the current directory and its
1543 1543 subdirectories. To search an entire repository, move to the root
1544 1544 of the repository.
1545 1545
1546 1546 If no patterns are given to match, this command prints all file
1547 1547 names.
1548 1548
1549 1549 If you want to feed the output of this command into the "xargs"
1550 1550 command, use the "-0" option to both this command and "xargs".
1551 1551 This will avoid the problem of "xargs" treating single filenames
1552 1552 that contain white space as multiple filenames.
1553 1553 """
1554 1554 end = opts['print0'] and '\0' or '\n'
1555 1555 rev = opts['rev']
1556 1556 if rev:
1557 1557 node = repo.lookup(rev)
1558 1558 else:
1559 1559 node = None
1560 1560
1561 1561 for src, abs, rel, exact in walk(repo, pats, opts, node=node,
1562 1562 head='(?:.*/|)'):
1563 1563 if not node and repo.dirstate.state(abs) == '?':
1564 1564 continue
1565 1565 if opts['fullpath']:
1566 1566 ui.write(os.path.join(repo.root, abs), end)
1567 1567 else:
1568 1568 ui.write(((pats and rel) or abs), end)
1569 1569
1570 1570 def log(ui, repo, *pats, **opts):
1571 1571 """show revision history of entire repository or files
1572 1572
1573 1573 Print the revision history of the specified files or the entire project.
1574 1574
1575 1575 By default this command outputs: changeset id and hash, tags,
1576 1576 non-trivial parents, user, date and time, and a summary for each
1577 1577 commit. When the -v/--verbose switch is used, the list of changed
1578 1578 files and full commit message is shown.
1579 1579 """
1580 1580 class dui(object):
1581 1581 # Implement and delegate some ui protocol. Save hunks of
1582 1582 # output for later display in the desired order.
1583 1583 def __init__(self, ui):
1584 1584 self.ui = ui
1585 1585 self.hunk = {}
1586 1586 def bump(self, rev):
1587 1587 self.rev = rev
1588 1588 self.hunk[rev] = []
1589 1589 def note(self, *args):
1590 1590 if self.verbose:
1591 1591 self.write(*args)
1592 1592 def status(self, *args):
1593 1593 if not self.quiet:
1594 1594 self.write(*args)
1595 1595 def write(self, *args):
1596 1596 self.hunk[self.rev].append(args)
1597 1597 def debug(self, *args):
1598 1598 if self.debugflag:
1599 1599 self.write(*args)
1600 1600 def __getattr__(self, key):
1601 1601 return getattr(self.ui, key)
1602 1602 changeiter, getchange, matchfn = walkchangerevs(ui, repo, pats, opts)
1603 1603 for st, rev, fns in changeiter:
1604 1604 if st == 'window':
1605 1605 du = dui(ui)
1606 1606 elif st == 'add':
1607 1607 du.bump(rev)
1608 1608 changenode = repo.changelog.node(rev)
1609 1609 parents = [p for p in repo.changelog.parents(changenode)
1610 1610 if p != nullid]
1611 1611 if opts['no_merges'] and len(parents) == 2:
1612 1612 continue
1613 1613 if opts['only_merges'] and len(parents) != 2:
1614 1614 continue
1615 1615
1616 1616 br = None
1617 1617 if opts['keyword']:
1618 1618 changes = getchange(rev)
1619 1619 miss = 0
1620 1620 for k in [kw.lower() for kw in opts['keyword']]:
1621 1621 if not (k in changes[1].lower() or
1622 1622 k in changes[4].lower() or
1623 1623 k in " ".join(changes[3][:20]).lower()):
1624 1624 miss = 1
1625 1625 break
1626 1626 if miss:
1627 1627 continue
1628 1628
1629 1629 if opts['branch']:
1630 1630 br = repo.branchlookup([repo.changelog.node(rev)])
1631 1631
1632 1632 show_changeset(du, repo, rev, brinfo=br)
1633 1633 if opts['patch']:
1634 1634 prev = (parents and parents[0]) or nullid
1635 1635 dodiff(du, du, repo, prev, changenode, match=matchfn)
1636 1636 du.write("\n\n")
1637 1637 elif st == 'iter':
1638 1638 for args in du.hunk[rev]:
1639 1639 ui.write(*args)
1640 1640
1641 1641 def manifest(ui, repo, rev=None):
1642 1642 """output the latest or given revision of the project manifest
1643 1643
1644 1644 Print a list of version controlled files for the given revision.
1645 1645
1646 1646 The manifest is the list of files being version controlled. If no revision
1647 1647 is given then the tip is used.
1648 1648 """
1649 1649 if rev:
1650 1650 try:
1651 1651 # assume all revision numbers are for changesets
1652 1652 n = repo.lookup(rev)
1653 1653 change = repo.changelog.read(n)
1654 1654 n = change[0]
1655 1655 except hg.RepoError:
1656 1656 n = repo.manifest.lookup(rev)
1657 1657 else:
1658 1658 n = repo.manifest.tip()
1659 1659 m = repo.manifest.read(n)
1660 1660 mf = repo.manifest.readflags(n)
1661 1661 files = m.keys()
1662 1662 files.sort()
1663 1663
1664 1664 for f in files:
1665 1665 ui.write("%40s %3s %s\n" % (hex(m[f]), mf[f] and "755" or "644", f))
1666 1666
1667 1667 def outgoing(ui, repo, dest="default-push", **opts):
1668 1668 """show changesets not found in destination
1669 1669
1670 1670 Show changesets not found in the specified destination repo or the
1671 1671 default push repo. These are the changesets that would be pushed
1672 1672 if a push was requested.
1673 1673 """
1674 1674 dest = ui.expandpath(dest, repo.root)
1675 1675 other = hg.repository(ui, dest)
1676 1676 o = repo.findoutgoing(other)
1677 1677 o = repo.changelog.nodesbetween(o)[0]
1678 1678 if opts['newest_first']:
1679 1679 o.reverse()
1680 1680 for n in o:
1681 1681 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1682 1682 if opts['no_merges'] and len(parents) == 2:
1683 1683 continue
1684 1684 show_changeset(ui, repo, changenode=n)
1685 1685 if opts['patch']:
1686 1686 prev = (parents and parents[0]) or nullid
1687 1687 dodiff(ui, ui, repo, prev, n)
1688 1688 ui.write("\n")
1689 1689
1690 1690 def parents(ui, repo, rev=None, branch=None):
1691 1691 """show the parents of the working dir or revision
1692 1692
1693 1693 Print the working directory's parent revisions.
1694 1694 """
1695 1695 if rev:
1696 1696 p = repo.changelog.parents(repo.lookup(rev))
1697 1697 else:
1698 1698 p = repo.dirstate.parents()
1699 1699
1700 1700 br = None
1701 1701 if branch is not None:
1702 1702 br = repo.branchlookup(p)
1703 1703 for n in p:
1704 1704 if n != nullid:
1705 1705 show_changeset(ui, repo, changenode=n, brinfo=br)
1706 1706
1707 1707 def paths(ui, search=None):
1708 1708 """show definition of symbolic path names
1709 1709
1710 1710 Show definition of symbolic path name NAME. If no name is given, show
1711 1711 definition of available names.
1712 1712
1713 1713 Path names are defined in the [paths] section of /etc/mercurial/hgrc
1714 1714 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
1715 1715 """
1716 1716 try:
1717 1717 repo = hg.repository(ui=ui)
1718 1718 except hg.RepoError:
1719 1719 pass
1720 1720
1721 1721 if search:
1722 1722 for name, path in ui.configitems("paths"):
1723 1723 if name == search:
1724 1724 ui.write("%s\n" % path)
1725 1725 return
1726 1726 ui.warn(_("not found!\n"))
1727 1727 return 1
1728 1728 else:
1729 1729 for name, path in ui.configitems("paths"):
1730 1730 ui.write("%s = %s\n" % (name, path))
1731 1731
1732 1732 def pull(ui, repo, source="default", **opts):
1733 1733 """pull changes from the specified source
1734 1734
1735 1735 Pull changes from a remote repository to a local one.
1736 1736
1737 1737 This finds all changes from the repository at the specified path
1738 1738 or URL and adds them to the local repository. By default, this
1739 1739 does not update the copy of the project in the working directory.
1740 1740
1741 1741 Valid URLs are of the form:
1742 1742
1743 1743 local/filesystem/path
1744 1744 http://[user@]host[:port][/path]
1745 1745 https://[user@]host[:port][/path]
1746 1746 ssh://[user@]host[:port][/path]
1747 1747
1748 1748 SSH requires an accessible shell account on the destination machine
1749 1749 and a copy of hg in the remote path. With SSH, paths are relative
1750 1750 to the remote user's home directory by default; use two slashes at
1751 1751 the start of a path to specify it as relative to the filesystem root.
1752 1752 """
1753 1753 source = ui.expandpath(source, repo.root)
1754 1754 ui.status(_('pulling from %s\n') % (source))
1755 1755
1756 1756 if opts['ssh']:
1757 1757 ui.setconfig("ui", "ssh", opts['ssh'])
1758 1758 if opts['remotecmd']:
1759 1759 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
1760 1760
1761 1761 other = hg.repository(ui, source)
1762 1762 revs = None
1763 1763 if opts['rev'] and not other.local():
1764 1764 raise util.Abort(_("pull -r doesn't work for remote repositories yet"))
1765 1765 elif opts['rev']:
1766 1766 revs = [other.lookup(rev) for rev in opts['rev']]
1767 1767 r = repo.pull(other, heads=revs)
1768 1768 if not r:
1769 1769 if opts['update']:
1770 1770 return update(ui, repo)
1771 1771 else:
1772 1772 ui.status(_("(run 'hg update' to get a working copy)\n"))
1773 1773
1774 1774 return r
1775 1775
1776 1776 def push(ui, repo, dest="default-push", force=False, ssh=None, remotecmd=None):
1777 1777 """push changes to the specified destination
1778 1778
1779 1779 Push changes from the local repository to the given destination.
1780 1780
1781 1781 This is the symmetrical operation for pull. It helps to move
1782 1782 changes from the current repository to a different one. If the
1783 1783 destination is local this is identical to a pull in that directory
1784 1784 from the current one.
1785 1785
1786 1786 By default, push will refuse to run if it detects the result would
1787 1787 increase the number of remote heads. This generally indicates the
1788 1788 the client has forgotten to sync and merge before pushing.
1789 1789
1790 1790 Valid URLs are of the form:
1791 1791
1792 1792 local/filesystem/path
1793 1793 ssh://[user@]host[:port][/path]
1794 1794
1795 1795 SSH requires an accessible shell account on the destination
1796 1796 machine and a copy of hg in the remote path.
1797 1797 """
1798 1798 dest = ui.expandpath(dest, repo.root)
1799 1799 ui.status('pushing to %s\n' % (dest))
1800 1800
1801 1801 if ssh:
1802 1802 ui.setconfig("ui", "ssh", ssh)
1803 1803 if remotecmd:
1804 1804 ui.setconfig("ui", "remotecmd", remotecmd)
1805 1805
1806 1806 other = hg.repository(ui, dest)
1807 1807 r = repo.push(other, force)
1808 1808 return r
1809 1809
1810 1810 def rawcommit(ui, repo, *flist, **rc):
1811 1811 """raw commit interface (DEPRECATED)
1812 1812
1813 1813 Lowlevel commit, for use in helper scripts.
1814 1814
1815 1815 This command is not intended to be used by normal users, as it is
1816 1816 primarily useful for importing from other SCMs.
1817 1817
1818 1818 This command is now deprecated and will be removed in a future
1819 1819 release, please use debugsetparents and commit instead.
1820 1820 """
1821 1821
1822 1822 ui.warn(_("(the rawcommit command is deprecated)\n"))
1823 1823
1824 1824 message = rc['message']
1825 1825 if not message and rc['logfile']:
1826 1826 try:
1827 1827 message = open(rc['logfile']).read()
1828 1828 except IOError:
1829 1829 pass
1830 1830 if not message and not rc['logfile']:
1831 1831 raise util.Abort(_("missing commit message"))
1832 1832
1833 1833 files = relpath(repo, list(flist))
1834 1834 if rc['files']:
1835 1835 files += open(rc['files']).read().splitlines()
1836 1836
1837 1837 rc['parent'] = map(repo.lookup, rc['parent'])
1838 1838
1839 1839 try:
1840 1840 repo.rawcommit(files, message, rc['user'], rc['date'], *rc['parent'])
1841 1841 except ValueError, inst:
1842 1842 raise util.Abort(str(inst))
1843 1843
1844 1844 def recover(ui, repo):
1845 1845 """roll back an interrupted transaction
1846 1846
1847 1847 Recover from an interrupted commit or pull.
1848 1848
1849 1849 This command tries to fix the repository status after an interrupted
1850 1850 operation. It should only be necessary when Mercurial suggests it.
1851 1851 """
1852 1852 if repo.recover():
1853 1853 return repo.verify()
1854 1854 return False
1855 1855
1856 1856 def remove(ui, repo, pat, *pats, **opts):
1857 1857 """remove the specified files on the next commit
1858 1858
1859 1859 Schedule the indicated files for removal from the repository.
1860 1860
1861 1861 This command schedules the files to be removed at the next commit.
1862 1862 This only removes files from the current branch, not from the
1863 1863 entire project history. If the files still exist in the working
1864 1864 directory, they will be deleted from it.
1865 1865 """
1866 1866 names = []
1867 1867 def okaytoremove(abs, rel, exact):
1868 1868 modified, added, removed, deleted, unknown = repo.changes(files=[abs])
1869 1869 reason = None
1870 1870 if modified:
1871 1871 reason = _('is modified')
1872 1872 elif added:
1873 1873 reason = _('has been marked for add')
1874 1874 elif unknown:
1875 1875 reason = _('is not managed')
1876 1876 if reason:
1877 1877 if exact:
1878 1878 ui.warn(_('not removing %s: file %s\n') % (rel, reason))
1879 1879 else:
1880 1880 return True
1881 1881 for src, abs, rel, exact in walk(repo, (pat,) + pats, opts):
1882 1882 if okaytoremove(abs, rel, exact):
1883 1883 if ui.verbose or not exact:
1884 1884 ui.status(_('removing %s\n') % rel)
1885 1885 names.append(abs)
1886 1886 repo.remove(names, unlink=True)
1887 1887
1888 1888 def rename(ui, repo, *pats, **opts):
1889 1889 """rename files; equivalent of copy + remove
1890 1890
1891 1891 Mark dest as copies of sources; mark sources for deletion. If
1892 1892 dest is a directory, copies are put in that directory. If dest is
1893 1893 a file, there can only be one source.
1894 1894
1895 1895 By default, this command copies the contents of files as they
1896 1896 stand in the working directory. If invoked with --after, the
1897 1897 operation is recorded, but no copying is performed.
1898 1898
1899 1899 This command takes effect in the next commit.
1900 1900
1901 1901 NOTE: This command should be treated as experimental. While it
1902 1902 should properly record rename files, this information is not yet
1903 1903 fully used by merge, nor fully reported by log.
1904 1904 """
1905 1905 errs, copied = docopy(ui, repo, pats, opts)
1906 1906 names = []
1907 1907 for abs, rel, exact in copied:
1908 1908 if ui.verbose or not exact:
1909 1909 ui.status(_('removing %s\n') % rel)
1910 1910 names.append(abs)
1911 1911 repo.remove(names, unlink=True)
1912 1912 return errs
1913 1913
1914 1914 def revert(ui, repo, *pats, **opts):
1915 1915 """revert modified files or dirs back to their unmodified states
1916 1916
1917 1917 Revert any uncommitted modifications made to the named files or
1918 1918 directories. This restores the contents of the affected files to
1919 1919 an unmodified state.
1920 1920
1921 1921 If a file has been deleted, it is recreated. If the executable
1922 1922 mode of a file was changed, it is reset.
1923 1923
1924 1924 If names are given, all files matching the names are reverted.
1925 1925
1926 1926 If no arguments are given, all files in the repository are reverted.
1927 1927 """
1928 1928 node = opts['rev'] and repo.lookup(opts['rev']) or \
1929 1929 repo.dirstate.parents()[0]
1930 1930
1931 1931 files, choose, anypats = matchpats(repo, pats, opts)
1932 1932 modified, added, removed, deleted, unknown = repo.changes(match=choose)
1933 1933 repo.forget(added)
1934 1934 repo.undelete(removed + deleted)
1935 1935
1936 1936 return repo.update(node, False, True, choose, False)
1937 1937
1938 1938 def root(ui, repo):
1939 1939 """print the root (top) of the current working dir
1940 1940
1941 1941 Print the root directory of the current repository.
1942 1942 """
1943 1943 ui.write(repo.root + "\n")
1944 1944
1945 1945 def serve(ui, repo, **opts):
1946 1946 """export the repository via HTTP
1947 1947
1948 1948 Start a local HTTP repository browser and pull server.
1949 1949
1950 1950 By default, the server logs accesses to stdout and errors to
1951 1951 stderr. Use the "-A" and "-E" options to log to files.
1952 1952 """
1953 1953
1954 1954 if opts["stdio"]:
1955 1955 fin, fout = sys.stdin, sys.stdout
1956 1956 sys.stdout = sys.stderr
1957 1957
1958 1958 # Prevent insertion/deletion of CRs
1959 1959 util.set_binary(fin)
1960 1960 util.set_binary(fout)
1961 1961
1962 1962 def getarg():
1963 1963 argline = fin.readline()[:-1]
1964 1964 arg, l = argline.split()
1965 1965 val = fin.read(int(l))
1966 1966 return arg, val
1967 1967 def respond(v):
1968 1968 fout.write("%d\n" % len(v))
1969 1969 fout.write(v)
1970 1970 fout.flush()
1971 1971
1972 1972 lock = None
1973 1973
1974 1974 while 1:
1975 1975 cmd = fin.readline()[:-1]
1976 1976 if cmd == '':
1977 1977 return
1978 1978 if cmd == "heads":
1979 1979 h = repo.heads()
1980 1980 respond(" ".join(map(hex, h)) + "\n")
1981 1981 if cmd == "lock":
1982 1982 lock = repo.lock()
1983 1983 respond("")
1984 1984 if cmd == "unlock":
1985 1985 if lock:
1986 1986 lock.release()
1987 1987 lock = None
1988 1988 respond("")
1989 1989 elif cmd == "branches":
1990 1990 arg, nodes = getarg()
1991 1991 nodes = map(bin, nodes.split(" "))
1992 1992 r = []
1993 1993 for b in repo.branches(nodes):
1994 1994 r.append(" ".join(map(hex, b)) + "\n")
1995 1995 respond("".join(r))
1996 1996 elif cmd == "between":
1997 1997 arg, pairs = getarg()
1998 1998 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
1999 1999 r = []
2000 2000 for b in repo.between(pairs):
2001 2001 r.append(" ".join(map(hex, b)) + "\n")
2002 2002 respond("".join(r))
2003 2003 elif cmd == "changegroup":
2004 2004 nodes = []
2005 2005 arg, roots = getarg()
2006 2006 nodes = map(bin, roots.split(" "))
2007 2007
2008 2008 cg = repo.changegroup(nodes, 'serve')
2009 2009 while 1:
2010 2010 d = cg.read(4096)
2011 2011 if not d:
2012 2012 break
2013 2013 fout.write(d)
2014 2014
2015 2015 fout.flush()
2016 2016
2017 2017 elif cmd == "addchangegroup":
2018 2018 if not lock:
2019 2019 respond("not locked")
2020 2020 continue
2021 2021 respond("")
2022 2022
2023 2023 r = repo.addchangegroup(fin)
2024 2024 respond("")
2025 2025
2026 2026 optlist = "name templates style address port ipv6 accesslog errorlog"
2027 2027 for o in optlist.split():
2028 2028 if opts[o]:
2029 2029 ui.setconfig("web", o, opts[o])
2030 2030
2031 2031 if opts['daemon'] and not opts['daemon_pipefds']:
2032 2032 rfd, wfd = os.pipe()
2033 2033 args = sys.argv[:]
2034 2034 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
2035 2035 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
2036 2036 args[0], args)
2037 2037 os.close(wfd)
2038 2038 os.read(rfd, 1)
2039 2039 os._exit(0)
2040 2040
2041 2041 try:
2042 2042 httpd = hgweb.create_server(repo)
2043 2043 except socket.error, inst:
2044 2044 raise util.Abort(_('cannot start server: ') + inst.args[1])
2045 2045
2046 2046 if ui.verbose:
2047 2047 addr, port = httpd.socket.getsockname()
2048 2048 if addr == '0.0.0.0':
2049 2049 addr = socket.gethostname()
2050 2050 else:
2051 2051 try:
2052 2052 addr = socket.gethostbyaddr(addr)[0]
2053 2053 except socket.error:
2054 2054 pass
2055 2055 if port != 80:
2056 2056 ui.status(_('listening at http://%s:%d/\n') % (addr, port))
2057 2057 else:
2058 2058 ui.status(_('listening at http://%s/\n') % addr)
2059 2059
2060 2060 if opts['pid_file']:
2061 2061 fp = open(opts['pid_file'], 'w')
2062 2062 fp.write(str(os.getpid()))
2063 2063 fp.close()
2064 2064
2065 2065 if opts['daemon_pipefds']:
2066 2066 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
2067 2067 os.close(rfd)
2068 2068 os.write(wfd, 'y')
2069 2069 os.close(wfd)
2070 2070 sys.stdout.flush()
2071 2071 sys.stderr.flush()
2072 2072 fd = os.open(util.nulldev, os.O_RDWR)
2073 2073 if fd != 0: os.dup2(fd, 0)
2074 2074 if fd != 1: os.dup2(fd, 1)
2075 2075 if fd != 2: os.dup2(fd, 2)
2076 2076 if fd not in (0, 1, 2): os.close(fd)
2077 2077
2078 2078 httpd.serve_forever()
2079 2079
2080 2080 def status(ui, repo, *pats, **opts):
2081 2081 """show changed files in the working directory
2082 2082
2083 2083 Show changed files in the repository. If names are
2084 2084 given, only files that match are shown.
2085 2085
2086 2086 The codes used to show the status of files are:
2087 2087 M = modified
2088 2088 A = added
2089 2089 R = removed
2090 2090 ! = deleted, but still tracked
2091 2091 ? = not tracked
2092 2092 """
2093 2093
2094 2094 files, matchfn, anypats = matchpats(repo, pats, opts)
2095 2095 cwd = (pats and repo.getcwd()) or ''
2096 2096 modified, added, removed, deleted, unknown = [
2097 2097 [util.pathto(cwd, x) for x in n]
2098 2098 for n in repo.changes(files=files, match=matchfn)]
2099 2099
2100 2100 changetypes = [(_('modified'), 'M', modified),
2101 2101 (_('added'), 'A', added),
2102 2102 (_('removed'), 'R', removed),
2103 2103 (_('deleted'), '!', deleted),
2104 2104 (_('unknown'), '?', unknown)]
2105 2105
2106 2106 end = opts['print0'] and '\0' or '\n'
2107 2107
2108 2108 for opt, char, changes in ([ct for ct in changetypes if opts[ct[0]]]
2109 2109 or changetypes):
2110 2110 if opts['no_status']:
2111 2111 format = "%%s%s" % end
2112 2112 else:
2113 2113 format = "%s %%s%s" % (char, end);
2114 2114
2115 2115 for f in changes:
2116 2116 ui.write(format % f)
2117 2117
2118 2118 def tag(ui, repo, name, rev_=None, **opts):
2119 2119 """add a tag for the current tip or a given revision
2120 2120
2121 2121 Name a particular revision using <name>.
2122 2122
2123 2123 Tags are used to name particular revisions of the repository and are
2124 2124 very useful to compare different revision, to go back to significant
2125 2125 earlier versions or to mark branch points as releases, etc.
2126 2126
2127 2127 If no revision is given, the tip is used.
2128 2128
2129 2129 To facilitate version control, distribution, and merging of tags,
2130 2130 they are stored as a file named ".hgtags" which is managed
2131 2131 similarly to other project files and can be hand-edited if
2132 2132 necessary. The file '.hg/localtags' is used for local tags (not
2133 2133 shared among repositories).
2134 2134 """
2135 2135 if name == "tip":
2136 2136 raise util.Abort(_("the name 'tip' is reserved"))
2137 2137 if rev_ is not None:
2138 2138 ui.warn(_("use of 'hg tag NAME [REV]' is deprecated, "
2139 2139 "please use 'hg tag [-r REV] NAME' instead\n"))
2140 2140 if opts['rev']:
2141 2141 raise util.Abort(_("use only one form to specify the revision"))
2142 2142 if opts['rev']:
2143 2143 rev_ = opts['rev']
2144 2144 if rev_:
2145 2145 r = hex(repo.lookup(rev_))
2146 2146 else:
2147 2147 r = hex(repo.changelog.tip())
2148 2148
2149 2149 disallowed = (revrangesep, '\r', '\n')
2150 2150 for c in disallowed:
2151 2151 if name.find(c) >= 0:
2152 2152 raise util.Abort(_("%s cannot be used in a tag name") % repr(c))
2153 2153
2154 2154 repo.hook('pretag', throw=True, node=r, tag=name,
2155 2155 local=int(not not opts['local']))
2156 2156
2157 2157 if opts['local']:
2158 2158 repo.opener("localtags", "a").write("%s %s\n" % (r, name))
2159 2159 repo.hook('tag', node=r, tag=name, local=1)
2160 2160 return
2161 2161
2162 2162 for x in repo.changes():
2163 2163 if ".hgtags" in x:
2164 2164 raise util.Abort(_("working copy of .hgtags is changed "
2165 2165 "(please commit .hgtags manually)"))
2166 2166
2167 2167 repo.wfile(".hgtags", "ab").write("%s %s\n" % (r, name))
2168 2168 if repo.dirstate.state(".hgtags") == '?':
2169 2169 repo.add([".hgtags"])
2170 2170
2171 2171 message = (opts['message'] or
2172 2172 _("Added tag %s for changeset %s") % (name, r))
2173 2173 try:
2174 2174 repo.commit([".hgtags"], message, opts['user'], opts['date'])
2175 2175 repo.hook('tag', node=r, tag=name, local=0)
2176 2176 except ValueError, inst:
2177 2177 raise util.Abort(str(inst))
2178 2178
2179 2179 def tags(ui, repo):
2180 2180 """list repository tags
2181 2181
2182 2182 List the repository tags.
2183 2183
2184 2184 This lists both regular and local tags.
2185 2185 """
2186 2186
2187 2187 l = repo.tagslist()
2188 2188 l.reverse()
2189 2189 for t, n in l:
2190 2190 try:
2191 2191 r = "%5d:%s" % (repo.changelog.rev(n), hex(n))
2192 2192 except KeyError:
2193 2193 r = " ?:?"
2194 2194 ui.write("%-30s %s\n" % (t, r))
2195 2195
2196 2196 def tip(ui, repo, **opts):
2197 2197 """show the tip revision
2198 2198
2199 2199 Show the tip revision.
2200 2200 """
2201 2201 n = repo.changelog.tip()
2202 2202 show_changeset(ui, repo, changenode=n)
2203 2203 if opts['patch']:
2204 2204 dodiff(ui, ui, repo, repo.changelog.parents(n)[0], n)
2205 2205
2206 2206 def unbundle(ui, repo, fname, **opts):
2207 2207 """apply a changegroup file
2208 2208
2209 2209 Apply a compressed changegroup file generated by the bundle
2210 2210 command.
2211 2211 """
2212 2212 f = urllib.urlopen(fname)
2213 2213
2214 2214 if f.read(4) != "HG10":
2215 2215 raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
2216 2216
2217 2217 def bzgenerator(f):
2218 2218 zd = bz2.BZ2Decompressor()
2219 2219 for chunk in f:
2220 2220 yield zd.decompress(chunk)
2221 2221
2222 2222 bzgen = bzgenerator(util.filechunkiter(f, 4096))
2223 2223 if repo.addchangegroup(util.chunkbuffer(bzgen)):
2224 2224 return 1
2225 2225
2226 2226 if opts['update']:
2227 2227 return update(ui, repo)
2228 2228 else:
2229 2229 ui.status(_("(run 'hg update' to get a working copy)\n"))
2230 2230
2231 2231 def undo(ui, repo):
2232 2232 """undo the last commit or pull
2233 2233
2234 2234 Roll back the last pull or commit transaction on the
2235 2235 repository, restoring the project to its earlier state.
2236 2236
2237 2237 This command should be used with care. There is only one level of
2238 2238 undo and there is no redo.
2239 2239
2240 2240 This command is not intended for use on public repositories. Once
2241 2241 a change is visible for pull by other users, undoing it locally is
2242 2242 ineffective.
2243 2243 """
2244 2244 repo.undo()
2245 2245
2246 2246 def update(ui, repo, node=None, merge=False, clean=False, force=None,
2247 2247 branch=None):
2248 2248 """update or merge working directory
2249 2249
2250 2250 Update the working directory to the specified revision.
2251 2251
2252 2252 If there are no outstanding changes in the working directory and
2253 2253 there is a linear relationship between the current version and the
2254 2254 requested version, the result is the requested version.
2255 2255
2256 2256 Otherwise the result is a merge between the contents of the
2257 2257 current working directory and the requested version. Files that
2258 2258 changed between either parent are marked as changed for the next
2259 2259 commit and a commit must be performed before any further updates
2260 2260 are allowed.
2261 2261
2262 2262 By default, update will refuse to run if doing so would require
2263 2263 merging or discarding local changes.
2264 2264 """
2265 2265 if branch:
2266 2266 br = repo.branchlookup(branch=branch)
2267 2267 found = []
2268 2268 for x in br:
2269 2269 if branch in br[x]:
2270 2270 found.append(x)
2271 2271 if len(found) > 1:
2272 2272 ui.warn(_("Found multiple heads for %s\n") % branch)
2273 2273 for x in found:
2274 2274 show_changeset(ui, repo, changenode=x, brinfo=br)
2275 2275 return 1
2276 2276 if len(found) == 1:
2277 2277 node = found[0]
2278 2278 ui.warn(_("Using head %s for branch %s\n") % (short(node), branch))
2279 2279 else:
2280 2280 ui.warn(_("branch %s not found\n") % (branch))
2281 2281 return 1
2282 2282 else:
2283 2283 node = node and repo.lookup(node) or repo.changelog.tip()
2284 2284 return repo.update(node, allow=merge, force=clean, forcemerge=force)
2285 2285
2286 2286 def verify(ui, repo):
2287 2287 """verify the integrity of the repository
2288 2288
2289 2289 Verify the integrity of the current repository.
2290 2290
2291 2291 This will perform an extensive check of the repository's
2292 2292 integrity, validating the hashes and checksums of each entry in
2293 2293 the changelog, manifest, and tracked files, as well as the
2294 2294 integrity of their crosslinks and indices.
2295 2295 """
2296 2296 return repo.verify()
2297 2297
2298 2298 # Command options and aliases are listed here, alphabetically
2299 2299
2300 2300 table = {
2301 2301 "^add":
2302 2302 (add,
2303 2303 [('I', 'include', [], _('include names matching the given patterns')),
2304 2304 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2305 2305 _('hg add [OPTION]... [FILE]...')),
2306 2306 "addremove":
2307 2307 (addremove,
2308 2308 [('I', 'include', [], _('include names matching the given patterns')),
2309 2309 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2310 2310 _('hg addremove [OPTION]... [FILE]...')),
2311 2311 "^annotate":
2312 2312 (annotate,
2313 2313 [('r', 'rev', '', _('annotate the specified revision')),
2314 2314 ('a', 'text', None, _('treat all files as text')),
2315 2315 ('u', 'user', None, _('list the author')),
2316 2316 ('d', 'date', None, _('list the date')),
2317 2317 ('n', 'number', None, _('list the revision number (default)')),
2318 2318 ('c', 'changeset', None, _('list the changeset')),
2319 2319 ('I', 'include', [], _('include names matching the given patterns')),
2320 2320 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2321 2321 _('hg annotate [OPTION]... FILE...')),
2322 2322 "bundle":
2323 2323 (bundle,
2324 2324 [],
2325 2325 _('hg bundle FILE DEST')),
2326 2326 "cat":
2327 2327 (cat,
2328 2328 [('I', 'include', [], _('include names matching the given patterns')),
2329 2329 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2330 2330 ('o', 'output', '', _('print output to file with formatted name')),
2331 2331 ('r', 'rev', '', _('print the given revision'))],
2332 2332 _('hg cat [OPTION]... FILE...')),
2333 2333 "^clone":
2334 2334 (clone,
2335 2335 [('U', 'noupdate', None, _('do not update the new working directory')),
2336 2336 ('e', 'ssh', '', _('specify ssh command to use')),
2337 2337 ('', 'pull', None, _('use pull protocol to copy metadata')),
2338 2338 ('r', 'rev', [],
2339 2339 _('a changeset you would like to have after cloning')),
2340 2340 ('', 'remotecmd', '',
2341 2341 _('specify hg command to run on the remote side'))],
2342 2342 _('hg clone [OPTION]... SOURCE [DEST]')),
2343 2343 "^commit|ci":
2344 2344 (commit,
2345 2345 [('A', 'addremove', None, _('run addremove during commit')),
2346 2346 ('I', 'include', [], _('include names matching the given patterns')),
2347 2347 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2348 2348 ('m', 'message', '', _('use <text> as commit message')),
2349 2349 ('l', 'logfile', '', _('read the commit message from <file>')),
2350 2350 ('d', 'date', '', _('record datecode as commit date')),
2351 2351 ('u', 'user', '', _('record user as commiter'))],
2352 2352 _('hg commit [OPTION]... [FILE]...')),
2353 2353 "copy|cp":
2354 2354 (copy,
2355 2355 [('I', 'include', [], _('include names matching the given patterns')),
2356 2356 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2357 2357 ('A', 'after', None, _('record a copy that has already occurred')),
2358 2358 ('f', 'force', None,
2359 2359 _('forcibly copy over an existing managed file'))],
2360 2360 _('hg copy [OPTION]... [SOURCE]... DEST')),
2361 2361 "debugancestor": (debugancestor, [], _('debugancestor INDEX REV1 REV2')),
2362 2362 "debugcheckstate": (debugcheckstate, [], _('debugcheckstate')),
2363 2363 "debugconfig": (debugconfig, [], _('debugconfig')),
2364 2364 "debugsetparents": (debugsetparents, [], _('debugsetparents REV1 [REV2]')),
2365 2365 "debugstate": (debugstate, [], _('debugstate')),
2366 2366 "debugdata": (debugdata, [], _('debugdata FILE REV')),
2367 2367 "debugindex": (debugindex, [], _('debugindex FILE')),
2368 2368 "debugindexdot": (debugindexdot, [], _('debugindexdot FILE')),
2369 2369 "debugrename": (debugrename, [], _('debugrename FILE [REV]')),
2370 2370 "debugwalk":
2371 2371 (debugwalk,
2372 2372 [('I', 'include', [], _('include names matching the given patterns')),
2373 2373 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2374 2374 _('debugwalk [OPTION]... [FILE]...')),
2375 2375 "^diff":
2376 2376 (diff,
2377 2377 [('r', 'rev', [], _('revision')),
2378 2378 ('a', 'text', None, _('treat all files as text')),
2379 2379 ('I', 'include', [], _('include names matching the given patterns')),
2380 2380 ('p', 'show-function', None,
2381 2381 _('show which function each change is in')),
2382 2382 ('w', 'ignore-all-space', None,
2383 2383 _('ignore white space when comparing lines')),
2384 2384 ('X', 'exclude', [],
2385 2385 _('exclude names matching the given patterns'))],
2386 2386 _('hg diff [-a] [-I] [-X] [-r REV1 [-r REV2]] [FILE]...')),
2387 2387 "^export":
2388 2388 (export,
2389 2389 [('o', 'output', '', _('print output to file with formatted name')),
2390 2390 ('a', 'text', None, _('treat all files as text')),
2391 2391 ('', 'switch-parent', None, _('diff against the second parent'))],
2392 2392 _('hg export [-a] [-o OUTFILE] REV...')),
2393 2393 "forget":
2394 2394 (forget,
2395 2395 [('I', 'include', [], _('include names matching the given patterns')),
2396 2396 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2397 2397 _('hg forget [OPTION]... FILE...')),
2398 2398 "grep":
2399 2399 (grep,
2400 2400 [('0', 'print0', None, _('end fields with NUL')),
2401 2401 ('I', 'include', [], _('include names matching the given patterns')),
2402 2402 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2403 2403 ('', 'all', None, _('print all revisions that match')),
2404 2404 ('i', 'ignore-case', None, _('ignore case when matching')),
2405 2405 ('l', 'files-with-matches', None,
2406 2406 _('print only filenames and revs that match')),
2407 2407 ('n', 'line-number', None, _('print matching line numbers')),
2408 2408 ('r', 'rev', [], _('search in given revision range')),
2409 2409 ('u', 'user', None, _('print user who committed change'))],
2410 2410 _('hg grep [OPTION]... PATTERN [FILE]...')),
2411 2411 "heads":
2412 2412 (heads,
2413 2413 [('b', 'branches', None, _('find branch info')),
2414 2414 ('r', 'rev', '', _('show only heads which are descendants of rev'))],
2415 2415 _('hg heads [-b] [-r <rev>]')),
2416 2416 "help": (help_, [], _('hg help [COMMAND]')),
2417 2417 "identify|id": (identify, [], _('hg identify')),
2418 2418 "import|patch":
2419 2419 (import_,
2420 2420 [('p', 'strip', 1,
2421 2421 _('directory strip option for patch. This has the same\n') +
2422 2422 _('meaning as the corresponding patch option')),
2423 2423 ('f', 'force', None,
2424 2424 _('skip check for outstanding uncommitted changes')),
2425 2425 ('b', 'base', '', _('base path'))],
2426 2426 _('hg import [-f] [-p NUM] [-b BASE] PATCH...')),
2427 2427 "incoming|in": (incoming,
2428 2428 [('M', 'no-merges', None, _('do not show merges')),
2429 2429 ('p', 'patch', None, _('show patch')),
2430 2430 ('n', 'newest-first', None, _('show newest record first'))],
2431 2431 _('hg incoming [-p] [-n] [-M] [SOURCE]')),
2432 2432 "^init": (init, [], _('hg init [DEST]')),
2433 2433 "locate":
2434 2434 (locate,
2435 2435 [('r', 'rev', '', _('search the repository as it stood at rev')),
2436 2436 ('0', 'print0', None,
2437 2437 _('end filenames with NUL, for use with xargs')),
2438 2438 ('f', 'fullpath', None,
2439 2439 _('print complete paths from the filesystem root')),
2440 2440 ('I', 'include', [], _('include names matching the given patterns')),
2441 2441 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2442 2442 _('hg locate [OPTION]... [PATTERN]...')),
2443 2443 "^log|history":
2444 2444 (log,
2445 2445 [('I', 'include', [], _('include names matching the given patterns')),
2446 2446 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2447 2447 ('b', 'branch', None, _('show branches')),
2448 2448 ('k', 'keyword', [], _('search for a keyword')),
2449 2449 ('r', 'rev', [], _('show the specified revision or range')),
2450 2450 ('M', 'no-merges', None, _('do not show merges')),
2451 2451 ('m', 'only-merges', None, _('show only merges')),
2452 2452 ('p', 'patch', None, _('show patch'))],
2453 2453 _('hg log [-I] [-X] [-r REV]... [-p] [FILE]')),
2454 2454 "manifest": (manifest, [], _('hg manifest [REV]')),
2455 2455 "outgoing|out": (outgoing,
2456 2456 [('M', 'no-merges', None, _('do not show merges')),
2457 2457 ('p', 'patch', None, _('show patch')),
2458 2458 ('n', 'newest-first', None, _('show newest record first'))],
2459 2459 _('hg outgoing [-p] [-n] [-M] [DEST]')),
2460 2460 "^parents":
2461 2461 (parents,
2462 2462 [('b', 'branch', None, _('show branches'))],
2463 2463 _('hg parents [-b] [REV]')),
2464 2464 "paths": (paths, [], _('hg paths [NAME]')),
2465 2465 "^pull":
2466 2466 (pull,
2467 2467 [('u', 'update', None,
2468 2468 _('update the working directory to tip after pull')),
2469 2469 ('e', 'ssh', '', _('specify ssh command to use')),
2470 2470 ('r', 'rev', [], _('a specific revision you would like to pull')),
2471 2471 ('', 'remotecmd', '',
2472 2472 _('specify hg command to run on the remote side'))],
2473 2473 _('hg pull [-u] [-e FILE] [-r rev] [--remotecmd FILE] [SOURCE]')),
2474 2474 "^push":
2475 2475 (push,
2476 2476 [('f', 'force', None, _('force push')),
2477 2477 ('e', 'ssh', '', _('specify ssh command to use')),
2478 2478 ('', 'remotecmd', '',
2479 2479 _('specify hg command to run on the remote side'))],
2480 2480 _('hg push [-f] [-e FILE] [--remotecmd FILE] [DEST]')),
2481 2481 "rawcommit":
2482 2482 (rawcommit,
2483 2483 [('p', 'parent', [], _('parent')),
2484 2484 ('d', 'date', '', _('date code')),
2485 2485 ('u', 'user', '', _('user')),
2486 2486 ('F', 'files', '', _('file list')),
2487 2487 ('m', 'message', '', _('commit message')),
2488 2488 ('l', 'logfile', '', _('commit message file'))],
2489 2489 _('hg rawcommit [OPTION]... [FILE]...')),
2490 2490 "recover": (recover, [], _('hg recover')),
2491 2491 "^remove|rm":
2492 2492 (remove,
2493 2493 [('I', 'include', [], _('include names matching the given patterns')),
2494 2494 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2495 2495 _('hg remove [OPTION]... FILE...')),
2496 2496 "rename|mv":
2497 2497 (rename,
2498 2498 [('I', 'include', [], _('include names matching the given patterns')),
2499 2499 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2500 2500 ('A', 'after', None, _('record a rename that has already occurred')),
2501 2501 ('f', 'force', None,
2502 2502 _('forcibly copy over an existing managed file'))],
2503 2503 _('hg rename [OPTION]... [SOURCE]... DEST')),
2504 2504 "^revert":
2505 2505 (revert,
2506 2506 [('I', 'include', [], _('include names matching the given patterns')),
2507 2507 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2508 2508 ('r', 'rev', '', _('revision to revert to'))],
2509 2509 _('hg revert [-n] [-r REV] [NAME]...')),
2510 2510 "root": (root, [], _('hg root')),
2511 2511 "^serve":
2512 2512 (serve,
2513 2513 [('A', 'accesslog', '', _('name of access log file to write to')),
2514 2514 ('d', 'daemon', None, _('run server in background')),
2515 2515 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
2516 2516 ('E', 'errorlog', '', _('name of error log file to write to')),
2517 2517 ('p', 'port', 0, _('port to use (default: 8000)')),
2518 2518 ('a', 'address', '', _('address to use')),
2519 2519 ('n', 'name', '',
2520 2520 _('name to show in web pages (default: working dir)')),
2521 2521 ('', 'pid-file', '', _('name of file to write process ID to')),
2522 2522 ('', 'stdio', None, _('for remote clients')),
2523 2523 ('t', 'templates', '', _('web templates to use')),
2524 2524 ('', 'style', '', _('template style to use')),
2525 2525 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4'))],
2526 2526 _('hg serve [OPTION]...')),
2527 2527 "^status|st":
2528 2528 (status,
2529 2529 [('m', 'modified', None, _('show only modified files')),
2530 2530 ('a', 'added', None, _('show only added files')),
2531 2531 ('r', 'removed', None, _('show only removed files')),
2532 2532 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
2533 2533 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
2534 2534 ('n', 'no-status', None, _('hide status prefix')),
2535 2535 ('0', 'print0', None,
2536 2536 _('end filenames with NUL, for use with xargs')),
2537 2537 ('I', 'include', [], _('include names matching the given patterns')),
2538 2538 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2539 2539 _('hg status [OPTION]... [FILE]...')),
2540 2540 "tag":
2541 2541 (tag,
2542 2542 [('l', 'local', None, _('make the tag local')),
2543 2543 ('m', 'message', '', _('message for tag commit log entry')),
2544 2544 ('d', 'date', '', _('record datecode as commit date')),
2545 2545 ('u', 'user', '', _('record user as commiter')),
2546 2546 ('r', 'rev', '', _('revision to tag'))],
2547 2547 _('hg tag [-r REV] [OPTION]... NAME')),
2548 2548 "tags": (tags, [], _('hg tags')),
2549 2549 "tip": (tip, [('p', 'patch', None, _('show patch'))], _('hg tip')),
2550 2550 "unbundle":
2551 2551 (unbundle,
2552 2552 [('u', 'update', None,
2553 2553 _('update the working directory to tip after unbundle'))],
2554 2554 _('hg unbundle [-u] FILE')),
2555 2555 "undo": (undo, [], _('hg undo')),
2556 2556 "^update|up|checkout|co":
2557 2557 (update,
2558 2558 [('b', 'branch', '', _('checkout the head of a specific branch')),
2559 2559 ('m', 'merge', None, _('allow merging of branches')),
2560 2560 ('C', 'clean', None, _('overwrite locally modified files')),
2561 2561 ('f', 'force', None, _('force a merge with outstanding changes'))],
2562 2562 _('hg update [-b TAG] [-m] [-C] [-f] [REV]')),
2563 2563 "verify": (verify, [], _('hg verify')),
2564 2564 "version": (show_version, [], _('hg version')),
2565 2565 }
2566 2566
2567 2567 globalopts = [
2568 2568 ('R', 'repository', '', _('repository root directory')),
2569 2569 ('', 'cwd', '', _('change working directory')),
2570 2570 ('y', 'noninteractive', None,
2571 2571 _('do not prompt, assume \'yes\' for any required answers')),
2572 2572 ('q', 'quiet', None, _('suppress output')),
2573 2573 ('v', 'verbose', None, _('enable additional output')),
2574 2574 ('', 'debug', None, _('enable debugging output')),
2575 2575 ('', 'debugger', None, _('start debugger')),
2576 2576 ('', 'traceback', None, _('print traceback on exception')),
2577 2577 ('', 'time', None, _('time how long the command takes')),
2578 2578 ('', 'profile', None, _('print command execution profile')),
2579 2579 ('', 'version', None, _('output version information and exit')),
2580 2580 ('h', 'help', None, _('display help and exit')),
2581 2581 ]
2582 2582
2583 2583 norepo = ("clone init version help debugancestor debugconfig debugdata"
2584 2584 " debugindex debugindexdot paths")
2585 2585
2586 2586 def find(cmd):
2587 2587 """Return (aliases, command table entry) for command string."""
2588 2588 choice = None
2589 2589 count = 0
2590 2590 for e in table.keys():
2591 2591 aliases = e.lstrip("^").split("|")
2592 2592 if cmd in aliases:
2593 2593 return aliases, table[e]
2594 2594 for a in aliases:
2595 2595 if a.startswith(cmd):
2596 2596 count += 1
2597 2597 choice = aliases, table[e]
2598 2598 break
2599 2599
2600 2600 if count > 1:
2601 2601 raise AmbiguousCommand(cmd)
2602 2602
2603 2603 if choice:
2604 2604 return choice
2605 2605
2606 2606 raise UnknownCommand(cmd)
2607 2607
2608 2608 class SignalInterrupt(Exception):
2609 2609 """Exception raised on SIGTERM and SIGHUP."""
2610 2610
2611 2611 def catchterm(*args):
2612 2612 raise SignalInterrupt
2613 2613
2614 2614 def run():
2615 2615 sys.exit(dispatch(sys.argv[1:]))
2616 2616
2617 2617 class ParseError(Exception):
2618 2618 """Exception raised on errors in parsing the command line."""
2619 2619
2620 2620 def parse(ui, args):
2621 2621 options = {}
2622 2622 cmdoptions = {}
2623 2623
2624 2624 try:
2625 2625 args = fancyopts.fancyopts(args, globalopts, options)
2626 2626 except fancyopts.getopt.GetoptError, inst:
2627 2627 raise ParseError(None, inst)
2628 2628
2629 2629 if args:
2630 2630 cmd, args = args[0], args[1:]
2631 2631 aliases, i = find(cmd)
2632 2632 cmd = aliases[0]
2633 2633 defaults = ui.config("defaults", cmd)
2634 2634 if defaults:
2635 2635 args = defaults.split() + args
2636 2636 c = list(i[1])
2637 2637 else:
2638 2638 cmd = None
2639 2639 c = []
2640 2640
2641 2641 # combine global options into local
2642 2642 for o in globalopts:
2643 2643 c.append((o[0], o[1], options[o[1]], o[3]))
2644 2644
2645 2645 try:
2646 2646 args = fancyopts.fancyopts(args, c, cmdoptions)
2647 2647 except fancyopts.getopt.GetoptError, inst:
2648 2648 raise ParseError(cmd, inst)
2649 2649
2650 2650 # separate global options back out
2651 2651 for o in globalopts:
2652 2652 n = o[1]
2653 2653 options[n] = cmdoptions[n]
2654 2654 del cmdoptions[n]
2655 2655
2656 2656 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
2657 2657
2658 2658 def dispatch(args):
2659 2659 signal.signal(signal.SIGTERM, catchterm)
2660 2660 try:
2661 2661 signal.signal(signal.SIGHUP, catchterm)
2662 2662 except AttributeError:
2663 2663 pass
2664 2664
2665 2665 try:
2666 2666 u = ui.ui()
2667 2667 except util.Abort, inst:
2668 2668 sys.stderr.write(_("abort: %s\n") % inst)
2669 2669 sys.exit(1)
2670 2670
2671 2671 external = []
2672 2672 for x in u.extensions():
2673 2673 def on_exception(exc, inst):
2674 2674 u.warn(_("*** failed to import extension %s\n") % x[1])
2675 2675 u.warn("%s\n" % inst)
2676 2676 if "--traceback" in sys.argv[1:]:
2677 2677 traceback.print_exc()
2678 2678 if x[1]:
2679 2679 try:
2680 2680 mod = imp.load_source(x[0], x[1])
2681 2681 except Exception, inst:
2682 2682 on_exception(Exception, inst)
2683 2683 continue
2684 2684 else:
2685 2685 def importh(name):
2686 2686 mod = __import__(name)
2687 2687 components = name.split('.')
2688 2688 for comp in components[1:]:
2689 2689 mod = getattr(mod, comp)
2690 2690 return mod
2691 2691 try:
2692 2692 mod = importh(x[0])
2693 2693 except Exception, inst:
2694 2694 on_exception(Exception, inst)
2695 2695 continue
2696 2696
2697 2697 external.append(mod)
2698 2698 for x in external:
2699 2699 cmdtable = getattr(x, 'cmdtable', {})
2700 2700 for t in cmdtable:
2701 2701 if t in table:
2702 2702 u.warn(_("module %s overrides %s\n") % (x.__name__, t))
2703 2703 table.update(cmdtable)
2704 2704
2705 2705 try:
2706 2706 cmd, func, args, options, cmdoptions = parse(u, args)
2707 2707 except ParseError, inst:
2708 2708 if inst.args[0]:
2709 2709 u.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
2710 2710 help_(u, inst.args[0])
2711 2711 else:
2712 2712 u.warn(_("hg: %s\n") % inst.args[1])
2713 2713 help_(u, 'shortlist')
2714 2714 sys.exit(-1)
2715 2715 except AmbiguousCommand, inst:
2716 2716 u.warn(_("hg: command '%s' is ambiguous.\n") % inst.args[0])
2717 2717 sys.exit(1)
2718 2718 except UnknownCommand, inst:
2719 2719 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
2720 2720 help_(u, 'shortlist')
2721 2721 sys.exit(1)
2722 2722
2723 2723 if options["time"]:
2724 2724 def get_times():
2725 2725 t = os.times()
2726 2726 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
2727 2727 t = (t[0], t[1], t[2], t[3], time.clock())
2728 2728 return t
2729 2729 s = get_times()
2730 2730 def print_time():
2731 2731 t = get_times()
2732 2732 u.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
2733 2733 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
2734 2734 atexit.register(print_time)
2735 2735
2736 2736 u.updateopts(options["verbose"], options["debug"], options["quiet"],
2737 2737 not options["noninteractive"])
2738 2738
2739 2739 # enter the debugger before command execution
2740 2740 if options['debugger']:
2741 2741 pdb.set_trace()
2742 2742
2743 2743 try:
2744 2744 try:
2745 2745 if options['help']:
2746 2746 help_(u, cmd, options['version'])
2747 2747 sys.exit(0)
2748 2748 elif options['version']:
2749 2749 show_version(u)
2750 2750 sys.exit(0)
2751 2751 elif not cmd:
2752 2752 help_(u, 'shortlist')
2753 2753 sys.exit(0)
2754 2754
2755 2755 if options['cwd']:
2756 2756 try:
2757 2757 os.chdir(options['cwd'])
2758 2758 except OSError, inst:
2759 2759 raise util.Abort('%s: %s' %
2760 2760 (options['cwd'], inst.strerror))
2761 2761
2762 2762 if cmd not in norepo.split():
2763 2763 path = options["repository"] or ""
2764 2764 repo = hg.repository(ui=u, path=path)
2765 2765 for x in external:
2766 2766 if hasattr(x, 'reposetup'):
2767 2767 x.reposetup(u, repo)
2768 2768 d = lambda: func(u, repo, *args, **cmdoptions)
2769 2769 else:
2770 2770 d = lambda: func(u, *args, **cmdoptions)
2771 2771
2772 2772 if options['profile']:
2773 2773 import hotshot, hotshot.stats
2774 2774 prof = hotshot.Profile("hg.prof")
2775 2775 r = prof.runcall(d)
2776 2776 prof.close()
2777 2777 stats = hotshot.stats.load("hg.prof")
2778 2778 stats.strip_dirs()
2779 2779 stats.sort_stats('time', 'calls')
2780 2780 stats.print_stats(40)
2781 2781 return r
2782 2782 else:
2783 2783 return d()
2784 2784 except:
2785 2785 # enter the debugger when we hit an exception
2786 2786 if options['debugger']:
2787 2787 pdb.post_mortem(sys.exc_info()[2])
2788 2788 if options['traceback']:
2789 2789 traceback.print_exc()
2790 2790 raise
2791 2791 except hg.RepoError, inst:
2792 2792 u.warn(_("abort: "), inst, "!\n")
2793 2793 except revlog.RevlogError, inst:
2794 2794 u.warn(_("abort: "), inst, "!\n")
2795 2795 except SignalInterrupt:
2796 2796 u.warn(_("killed!\n"))
2797 2797 except KeyboardInterrupt:
2798 2798 try:
2799 2799 u.warn(_("interrupted!\n"))
2800 2800 except IOError, inst:
2801 2801 if inst.errno == errno.EPIPE:
2802 2802 if u.debugflag:
2803 2803 u.warn(_("\nbroken pipe\n"))
2804 2804 else:
2805 2805 raise
2806 2806 except IOError, inst:
2807 2807 if hasattr(inst, "code"):
2808 2808 u.warn(_("abort: %s\n") % inst)
2809 2809 elif hasattr(inst, "reason"):
2810 2810 u.warn(_("abort: error: %s\n") % inst.reason[1])
2811 2811 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
2812 2812 if u.debugflag:
2813 2813 u.warn(_("broken pipe\n"))
2814 2814 elif getattr(inst, "strerror", None):
2815 2815 if getattr(inst, "filename", None):
2816 2816 u.warn(_("abort: %s - %s\n") % (inst.strerror, inst.filename))
2817 2817 else:
2818 2818 u.warn(_("abort: %s\n") % inst.strerror)
2819 2819 else:
2820 2820 raise
2821 2821 except OSError, inst:
2822 2822 if hasattr(inst, "filename"):
2823 2823 u.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
2824 2824 else:
2825 2825 u.warn(_("abort: %s\n") % inst.strerror)
2826 2826 except util.Abort, inst:
2827 2827 u.warn(_('abort: '), inst.args[0] % inst.args[1:], '\n')
2828 2828 sys.exit(1)
2829 2829 except TypeError, inst:
2830 2830 # was this an argument error?
2831 2831 tb = traceback.extract_tb(sys.exc_info()[2])
2832 2832 if len(tb) > 2: # no
2833 2833 raise
2834 2834 u.debug(inst, "\n")
2835 2835 u.warn(_("%s: invalid arguments\n") % cmd)
2836 2836 help_(u, cmd)
2837 2837 except AmbiguousCommand, inst:
2838 2838 u.warn(_("hg: command '%s' is ambiguous.\n") % inst.args[0])
2839 2839 help_(u, 'shortlist')
2840 2840 except UnknownCommand, inst:
2841 2841 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
2842 2842 help_(u, 'shortlist')
2843 2843 except SystemExit:
2844 2844 # don't catch this in the catch-all below
2845 2845 raise
2846 2846 except:
2847 2847 u.warn(_("** unknown exception encountered, details follow\n"))
2848 2848 u.warn(_("** report bug details to mercurial@selenic.com\n"))
2849 2849 u.warn(_("** Mercurial Distributed SCM (version %s)\n")
2850 2850 % version.get_version())
2851 2851 raise
2852 2852
2853 2853 sys.exit(-1)
@@ -1,420 +1,420
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 import struct, os
11 11 from node import *
12 12 from i18n import gettext as _
13 13 from demandload import *
14 14 demandload(globals(), "time bisect stat util re errno")
15 15
16 16 class dirstate(object):
17 17 def __init__(self, opener, ui, root):
18 18 self.opener = opener
19 19 self.root = root
20 20 self.dirty = 0
21 21 self.ui = ui
22 22 self.map = None
23 23 self.pl = None
24 24 self.copies = {}
25 25 self.ignorefunc = None
26 26 self.blockignore = False
27 27
28 28 def wjoin(self, f):
29 29 return os.path.join(self.root, f)
30 30
31 31 def getcwd(self):
32 32 cwd = os.getcwd()
33 33 if cwd == self.root: return ''
34 34 return cwd[len(self.root) + 1:]
35 35
36 36 def hgignore(self):
37 37 '''return the contents of .hgignore as a list of patterns.
38 38
39 39 trailing white space is dropped.
40 40 the escape character is backslash.
41 41 comments start with #.
42 42 empty lines are skipped.
43 43
44 44 lines can be of the following formats:
45 45
46 46 syntax: regexp # defaults following lines to non-rooted regexps
47 47 syntax: glob # defaults following lines to non-rooted globs
48 48 re:pattern # non-rooted regular expression
49 49 glob:pattern # non-rooted glob
50 50 pattern # pattern of the current default type'''
51 51 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
52 52 def parselines(fp):
53 53 for line in fp:
54 54 escape = False
55 55 for i in xrange(len(line)):
56 56 if escape: escape = False
57 57 elif line[i] == '\\': escape = True
58 58 elif line[i] == '#': break
59 59 line = line[:i].rstrip()
60 60 if line: yield line
61 61 pats = []
62 62 try:
63 63 fp = open(self.wjoin('.hgignore'))
64 64 syntax = 'relre:'
65 65 for line in parselines(fp):
66 66 if line.startswith('syntax:'):
67 67 s = line[7:].strip()
68 68 try:
69 69 syntax = syntaxes[s]
70 70 except KeyError:
71 71 self.ui.warn(_(".hgignore: ignoring invalid "
72 72 "syntax '%s'\n") % s)
73 73 continue
74 74 pat = syntax + line
75 75 for s in syntaxes.values():
76 76 if line.startswith(s):
77 77 pat = line
78 78 break
79 79 pats.append(pat)
80 80 except IOError: pass
81 81 return pats
82 82
83 83 def ignore(self, fn):
84 84 '''default match function used by dirstate and localrepository.
85 85 this honours the .hgignore file, and nothing more.'''
86 86 if self.blockignore:
87 87 return False
88 88 if not self.ignorefunc:
89 89 ignore = self.hgignore()
90 90 if ignore:
91 91 files, self.ignorefunc, anypats = util.matcher(self.root,
92 92 inc=ignore,
93 93 src='.hgignore')
94 94 else:
95 95 self.ignorefunc = util.never
96 96 return self.ignorefunc(fn)
97 97
98 98 def __del__(self):
99 99 if self.dirty:
100 100 self.write()
101 101
102 102 def __getitem__(self, key):
103 103 try:
104 104 return self.map[key]
105 105 except TypeError:
106 106 self.lazyread()
107 107 return self[key]
108 108
109 109 def __contains__(self, key):
110 110 self.lazyread()
111 111 return key in self.map
112 112
113 113 def parents(self):
114 114 self.lazyread()
115 115 return self.pl
116 116
117 117 def markdirty(self):
118 118 if not self.dirty:
119 119 self.dirty = 1
120 120
121 121 def setparents(self, p1, p2=nullid):
122 122 self.lazyread()
123 123 self.markdirty()
124 124 self.pl = p1, p2
125 125
126 126 def state(self, key):
127 127 try:
128 128 return self[key][0]
129 129 except KeyError:
130 130 return "?"
131 131
132 132 def lazyread(self):
133 133 if self.map is None:
134 134 self.read()
135 135
136 136 def read(self):
137 137 self.map = {}
138 138 self.pl = [nullid, nullid]
139 139 try:
140 140 st = self.opener("dirstate").read()
141 141 if not st: return
142 142 except: return
143 143
144 144 self.pl = [st[:20], st[20: 40]]
145 145
146 146 pos = 40
147 147 while pos < len(st):
148 148 e = struct.unpack(">cllll", st[pos:pos+17])
149 149 l = e[4]
150 150 pos += 17
151 151 f = st[pos:pos + l]
152 152 if '\0' in f:
153 153 f, c = f.split('\0')
154 154 self.copies[f] = c
155 155 self.map[f] = e[:4]
156 156 pos += l
157 157
158 158 def copy(self, source, dest):
159 159 self.lazyread()
160 160 self.markdirty()
161 161 self.copies[dest] = source
162 162
163 163 def copied(self, file):
164 164 return self.copies.get(file, None)
165 165
166 166 def update(self, files, state, **kw):
167 167 ''' current states:
168 168 n normal
169 169 m needs merging
170 170 r marked for removal
171 171 a marked for addition'''
172 172
173 173 if not files: return
174 174 self.lazyread()
175 175 self.markdirty()
176 176 for f in files:
177 177 if state == "r":
178 178 self.map[f] = ('r', 0, 0, 0)
179 179 else:
180 180 s = os.lstat(self.wjoin(f))
181 181 st_size = kw.get('st_size', s.st_size)
182 182 st_mtime = kw.get('st_mtime', s.st_mtime)
183 183 self.map[f] = (state, s.st_mode, st_size, st_mtime)
184 184 if self.copies.has_key(f):
185 185 del self.copies[f]
186 186
187 187 def forget(self, files):
188 188 if not files: return
189 189 self.lazyread()
190 190 self.markdirty()
191 191 for f in files:
192 192 try:
193 193 del self.map[f]
194 194 except KeyError:
195 195 self.ui.warn(_("not in dirstate: %s!\n") % f)
196 196 pass
197 197
198 198 def clear(self):
199 199 self.map = {}
200 200 self.markdirty()
201 201
202 202 def write(self):
203 203 st = self.opener("dirstate", "w", atomic=True)
204 204 st.write("".join(self.pl))
205 205 for f, e in self.map.items():
206 206 c = self.copied(f)
207 207 if c:
208 208 f = f + "\0" + c
209 209 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
210 210 st.write(e + f)
211 211 self.dirty = 0
212 212
213 213 def filterfiles(self, files):
214 214 ret = {}
215 215 unknown = []
216 216
217 217 for x in files:
218 218 if x == '.':
219 219 return self.map.copy()
220 220 if x not in self.map:
221 221 unknown.append(x)
222 222 else:
223 223 ret[x] = self.map[x]
224 224
225 225 if not unknown:
226 226 return ret
227 227
228 228 b = self.map.keys()
229 229 b.sort()
230 230 blen = len(b)
231 231
232 232 for x in unknown:
233 233 bs = bisect.bisect(b, x)
234 234 if bs != 0 and b[bs-1] == x:
235 235 ret[x] = self.map[x]
236 236 continue
237 237 while bs < blen:
238 238 s = b[bs]
239 239 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
240 240 ret[s] = self.map[s]
241 241 else:
242 242 break
243 243 bs += 1
244 244 return ret
245 245
246 246 def supported_type(self, f, st, verbose=False):
247 247 if stat.S_ISREG(st.st_mode):
248 248 return True
249 249 if verbose:
250 250 kind = 'unknown'
251 251 if stat.S_ISCHR(st.st_mode): kind = _('character device')
252 252 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
253 253 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
254 254 elif stat.S_ISLNK(st.st_mode): kind = _('symbolic link')
255 255 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
256 256 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
257 257 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
258 258 util.pathto(self.getcwd(), f),
259 259 kind))
260 260 return False
261 261
262 262 def statwalk(self, files=None, match=util.always, dc=None):
263 263 self.lazyread()
264 264
265 265 # walk all files by default
266 266 if not files:
267 267 files = [self.root]
268 268 if not dc:
269 269 dc = self.map.copy()
270 270 elif not dc:
271 271 dc = self.filterfiles(files)
272 272
273 def statmatch(file, stat):
274 file = util.pconvert(file)
275 if file not in dc and self.ignore(file):
273 def statmatch(file_, stat):
274 file_ = util.pconvert(file_)
275 if file_ not in dc and self.ignore(file_):
276 276 return False
277 return match(file)
277 return match(file_)
278 278
279 279 return self.walkhelper(files=files, statmatch=statmatch, dc=dc)
280 280
281 281 def walk(self, files=None, match=util.always, dc=None):
282 282 # filter out the stat
283 283 for src, f, st in self.statwalk(files, match, dc):
284 284 yield src, f
285 285
286 286 # walk recursively through the directory tree, finding all files
287 287 # matched by the statmatch function
288 288 #
289 289 # results are yielded in a tuple (src, filename, st), where src
290 290 # is one of:
291 291 # 'f' the file was found in the directory tree
292 292 # 'm' the file was only in the dirstate and not in the tree
293 293 # and st is the stat result if the file was found in the directory.
294 294 #
295 295 # dc is an optional arg for the current dirstate. dc is not modified
296 296 # directly by this function, but might be modified by your statmatch call.
297 297 #
298 298 def walkhelper(self, files, statmatch, dc):
299 299 # recursion free walker, faster than os.walk.
300 300 def findfiles(s):
301 301 work = [s]
302 302 while work:
303 303 top = work.pop()
304 304 names = os.listdir(top)
305 305 names.sort()
306 306 # nd is the top of the repository dir tree
307 307 nd = util.normpath(top[len(self.root) + 1:])
308 308 if nd == '.': nd = ''
309 309 for f in names:
310 310 np = util.pconvert(os.path.join(nd, f))
311 311 if seen(np):
312 312 continue
313 313 p = os.path.join(top, f)
314 314 # don't trip over symlinks
315 315 st = os.lstat(p)
316 316 if stat.S_ISDIR(st.st_mode):
317 317 ds = os.path.join(nd, f +'/')
318 318 if statmatch(ds, st):
319 319 work.append(p)
320 320 if statmatch(np, st) and np in dc:
321 321 yield 'm', np, st
322 322 elif statmatch(np, st):
323 323 if self.supported_type(np, st):
324 324 yield 'f', np, st
325 325 elif np in dc:
326 326 yield 'm', np, st
327 327
328 328 known = {'.hg': 1}
329 329 def seen(fn):
330 330 if fn in known: return True
331 331 known[fn] = 1
332 332
333 333 # step one, find all files that match our criteria
334 334 files.sort()
335 335 for ff in util.unique(files):
336 336 f = self.wjoin(ff)
337 337 try:
338 338 st = os.lstat(f)
339 339 except OSError, inst:
340 340 nf = util.normpath(ff)
341 341 found = False
342 342 for fn in dc:
343 343 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
344 344 found = True
345 345 break
346 346 if not found:
347 347 self.ui.warn('%s: %s\n' % (
348 348 util.pathto(self.getcwd(), ff),
349 349 inst.strerror))
350 350 continue
351 351 if stat.S_ISDIR(st.st_mode):
352 352 cmp1 = (lambda x, y: cmp(x[1], y[1]))
353 sorted = [ x for x in findfiles(f) ]
354 sorted.sort(cmp1)
355 for e in sorted:
353 sorted_ = [ x for x in findfiles(f) ]
354 sorted_.sort(cmp1)
355 for e in sorted_:
356 356 yield e
357 357 else:
358 358 ff = util.normpath(ff)
359 359 if seen(ff):
360 360 continue
361 361 self.blockignore = True
362 362 if statmatch(ff, st):
363 363 if self.supported_type(ff, st, verbose=True):
364 364 yield 'f', ff, st
365 365 elif ff in dc:
366 366 yield 'm', ff, st
367 367 self.blockignore = False
368 368
369 369 # step two run through anything left in the dc hash and yield
370 370 # if we haven't already seen it
371 371 ks = dc.keys()
372 372 ks.sort()
373 373 for k in ks:
374 374 if not seen(k) and (statmatch(k, None)):
375 375 yield 'm', k, None
376 376
377 377 def changes(self, files=None, match=util.always):
378 378 lookup, modified, added, unknown = [], [], [], []
379 379 removed, deleted = [], []
380 380
381 381 for src, fn, st in self.statwalk(files, match):
382 382 try:
383 type, mode, size, time = self[fn]
383 type_, mode, size, time = self[fn]
384 384 except KeyError:
385 385 unknown.append(fn)
386 386 continue
387 387 if src == 'm':
388 388 nonexistent = True
389 389 if not st:
390 390 try:
391 391 f = self.wjoin(fn)
392 392 st = os.lstat(f)
393 393 except OSError, inst:
394 394 if inst.errno != errno.ENOENT:
395 395 raise
396 396 st = None
397 397 # We need to re-check that it is a valid file
398 398 if st and self.supported_type(fn, st):
399 399 nonexistent = False
400 400 # XXX: what to do with file no longer present in the fs
401 401 # who are not removed in the dirstate ?
402 if nonexistent and type in "nm":
402 if nonexistent and type_ in "nm":
403 403 deleted.append(fn)
404 404 continue
405 405 # check the common case first
406 if type == 'n':
406 if type_ == 'n':
407 407 if not st:
408 408 st = os.stat(fn)
409 409 if size != st.st_size or (mode ^ st.st_mode) & 0100:
410 410 modified.append(fn)
411 411 elif time != st.st_mtime:
412 412 lookup.append(fn)
413 elif type == 'm':
413 elif type_ == 'm':
414 414 modified.append(fn)
415 elif type == 'a':
415 elif type_ == 'a':
416 416 added.append(fn)
417 elif type == 'r':
417 elif type_ == 'r':
418 418 removed.append(fn)
419 419
420 420 return (lookup, modified, added, removed, deleted, unknown)
@@ -1,1853 +1,1853
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository(object):
16 16 def __init__(self, ui, path=None, create=0):
17 17 if not path:
18 18 p = os.getcwd()
19 19 while not os.path.isdir(os.path.join(p, ".hg")):
20 20 oldp = p
21 21 p = os.path.dirname(p)
22 22 if p == oldp:
23 23 raise repo.RepoError(_("no repo found"))
24 24 path = p
25 25 self.path = os.path.join(path, ".hg")
26 26
27 27 if not create and not os.path.isdir(self.path):
28 28 raise repo.RepoError(_("repository %s not found") % path)
29 29
30 30 self.root = os.path.abspath(path)
31 31 self.ui = ui
32 32 self.opener = util.opener(self.path)
33 33 self.wopener = util.opener(self.root)
34 34 self.manifest = manifest.manifest(self.opener)
35 35 self.changelog = changelog.changelog(self.opener)
36 36 self.tagscache = None
37 37 self.nodetagscache = None
38 38 self.encodepats = None
39 39 self.decodepats = None
40 40
41 41 if create:
42 42 os.mkdir(self.path)
43 43 os.mkdir(self.join("data"))
44 44
45 45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
46 46 try:
47 47 self.ui.readconfig(self.join("hgrc"))
48 48 except IOError:
49 49 pass
50 50
51 51 def hook(self, name, throw=False, **args):
52 52 def runhook(name, cmd):
53 53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
54 54 old = {}
55 55 for k, v in args.items():
56 56 k = k.upper()
57 57 old['HG_' + k] = os.environ.get(k, None)
58 58 old[k] = os.environ.get(k, None)
59 59 os.environ['HG_' + k] = str(v)
60 60 os.environ[k] = str(v)
61 61
62 62 try:
63 63 # Hooks run in the repository root
64 64 olddir = os.getcwd()
65 65 os.chdir(self.root)
66 66 r = os.system(cmd)
67 67 finally:
68 68 for k, v in old.items():
69 69 if v is not None:
70 70 os.environ[k] = v
71 71 else:
72 72 del os.environ[k]
73 73
74 74 os.chdir(olddir)
75 75
76 76 if r:
77 77 desc, r = util.explain_exit(r)
78 78 if throw:
79 79 raise util.Abort(_('%s hook %s') % (name, desc))
80 80 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
81 81 return False
82 82 return True
83 83
84 84 r = True
85 85 for hname, cmd in self.ui.configitems("hooks"):
86 86 s = hname.split(".")
87 87 if s[0] == name and cmd:
88 88 r = runhook(hname, cmd) and r
89 89 return r
90 90
91 91 def tags(self):
92 92 '''return a mapping of tag to node'''
93 93 if not self.tagscache:
94 94 self.tagscache = {}
95 95 def addtag(self, k, n):
96 96 try:
97 97 bin_n = bin(n)
98 98 except TypeError:
99 99 bin_n = ''
100 100 self.tagscache[k.strip()] = bin_n
101 101
102 102 try:
103 103 # read each head of the tags file, ending with the tip
104 104 # and add each tag found to the map, with "newer" ones
105 105 # taking precedence
106 106 fl = self.file(".hgtags")
107 107 h = fl.heads()
108 108 h.reverse()
109 109 for r in h:
110 110 for l in fl.read(r).splitlines():
111 111 if l:
112 112 n, k = l.split(" ", 1)
113 113 addtag(self, k, n)
114 114 except KeyError:
115 115 pass
116 116
117 117 try:
118 118 f = self.opener("localtags")
119 119 for l in f:
120 120 n, k = l.split(" ", 1)
121 121 addtag(self, k, n)
122 122 except IOError:
123 123 pass
124 124
125 125 self.tagscache['tip'] = self.changelog.tip()
126 126
127 127 return self.tagscache
128 128
129 129 def tagslist(self):
130 130 '''return a list of tags ordered by revision'''
131 131 l = []
132 132 for t, n in self.tags().items():
133 133 try:
134 134 r = self.changelog.rev(n)
135 135 except:
136 136 r = -2 # sort to the beginning of the list if unknown
137 137 l.append((r, t, n))
138 138 l.sort()
139 139 return [(t, n) for r, t, n in l]
140 140
141 141 def nodetags(self, node):
142 142 '''return the tags associated with a node'''
143 143 if not self.nodetagscache:
144 144 self.nodetagscache = {}
145 145 for t, n in self.tags().items():
146 146 self.nodetagscache.setdefault(n, []).append(t)
147 147 return self.nodetagscache.get(node, [])
148 148
149 149 def lookup(self, key):
150 150 try:
151 151 return self.tags()[key]
152 152 except KeyError:
153 153 try:
154 154 return self.changelog.lookup(key)
155 155 except:
156 156 raise repo.RepoError(_("unknown revision '%s'") % key)
157 157
158 158 def dev(self):
159 159 return os.stat(self.path).st_dev
160 160
161 161 def local(self):
162 162 return True
163 163
164 164 def join(self, f):
165 165 return os.path.join(self.path, f)
166 166
167 167 def wjoin(self, f):
168 168 return os.path.join(self.root, f)
169 169
170 170 def file(self, f):
171 171 if f[0] == '/':
172 172 f = f[1:]
173 173 return filelog.filelog(self.opener, f)
174 174
175 175 def getcwd(self):
176 176 return self.dirstate.getcwd()
177 177
178 178 def wfile(self, f, mode='r'):
179 179 return self.wopener(f, mode)
180 180
181 181 def wread(self, filename):
182 182 if self.encodepats == None:
183 183 l = []
184 184 for pat, cmd in self.ui.configitems("encode"):
185 185 mf = util.matcher("", "/", [pat], [], [])[1]
186 186 l.append((mf, cmd))
187 187 self.encodepats = l
188 188
189 189 data = self.wopener(filename, 'r').read()
190 190
191 191 for mf, cmd in self.encodepats:
192 192 if mf(filename):
193 193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
194 194 data = util.filter(data, cmd)
195 195 break
196 196
197 197 return data
198 198
199 199 def wwrite(self, filename, data, fd=None):
200 200 if self.decodepats == None:
201 201 l = []
202 202 for pat, cmd in self.ui.configitems("decode"):
203 203 mf = util.matcher("", "/", [pat], [], [])[1]
204 204 l.append((mf, cmd))
205 205 self.decodepats = l
206 206
207 207 for mf, cmd in self.decodepats:
208 208 if mf(filename):
209 209 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
210 210 data = util.filter(data, cmd)
211 211 break
212 212
213 213 if fd:
214 214 return fd.write(data)
215 215 return self.wopener(filename, 'w').write(data)
216 216
217 217 def transaction(self):
218 218 # save dirstate for undo
219 219 try:
220 220 ds = self.opener("dirstate").read()
221 221 except IOError:
222 222 ds = ""
223 223 self.opener("journal.dirstate", "w").write(ds)
224 224
225 225 def after():
226 226 util.rename(self.join("journal"), self.join("undo"))
227 227 util.rename(self.join("journal.dirstate"),
228 228 self.join("undo.dirstate"))
229 229
230 230 return transaction.transaction(self.ui.warn, self.opener,
231 231 self.join("journal"), after)
232 232
233 233 def recover(self):
234 lock = self.lock()
234 l = self.lock()
235 235 if os.path.exists(self.join("journal")):
236 236 self.ui.status(_("rolling back interrupted transaction\n"))
237 237 transaction.rollback(self.opener, self.join("journal"))
238 238 self.manifest = manifest.manifest(self.opener)
239 239 self.changelog = changelog.changelog(self.opener)
240 240 return True
241 241 else:
242 242 self.ui.warn(_("no interrupted transaction available\n"))
243 243 return False
244 244
245 245 def undo(self, wlock=None):
246 246 if not wlock:
247 247 wlock = self.wlock()
248 lock = self.lock()
248 l = self.lock()
249 249 if os.path.exists(self.join("undo")):
250 250 self.ui.status(_("rolling back last transaction\n"))
251 251 transaction.rollback(self.opener, self.join("undo"))
252 252 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
253 253 self.dirstate.read()
254 254 else:
255 255 self.ui.warn(_("no undo information available\n"))
256 256
257 257 def lock(self, wait=1):
258 258 try:
259 259 return lock.lock(self.join("lock"), 0)
260 260 except lock.LockHeld, inst:
261 261 if wait:
262 262 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
263 263 return lock.lock(self.join("lock"), wait)
264 264 raise inst
265 265
266 266 def wlock(self, wait=1):
267 267 try:
268 268 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
269 269 except lock.LockHeld, inst:
270 270 if not wait:
271 271 raise inst
272 272 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
273 273 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
274 274 self.dirstate.read()
275 275 return wlock
276 276
277 277 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
278 278 "determine whether a new filenode is needed"
279 279 fp1 = manifest1.get(filename, nullid)
280 280 fp2 = manifest2.get(filename, nullid)
281 281
282 282 if fp2 != nullid:
283 283 # is one parent an ancestor of the other?
284 284 fpa = filelog.ancestor(fp1, fp2)
285 285 if fpa == fp1:
286 286 fp1, fp2 = fp2, nullid
287 287 elif fpa == fp2:
288 288 fp2 = nullid
289 289
290 290 # is the file unmodified from the parent? report existing entry
291 291 if fp2 == nullid and text == filelog.read(fp1):
292 292 return (fp1, None, None)
293 293
294 294 return (None, fp1, fp2)
295 295
296 296 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
297 297 orig_parent = self.dirstate.parents()[0] or nullid
298 298 p1 = p1 or self.dirstate.parents()[0] or nullid
299 299 p2 = p2 or self.dirstate.parents()[1] or nullid
300 300 c1 = self.changelog.read(p1)
301 301 c2 = self.changelog.read(p2)
302 302 m1 = self.manifest.read(c1[0])
303 303 mf1 = self.manifest.readflags(c1[0])
304 304 m2 = self.manifest.read(c2[0])
305 305 changed = []
306 306
307 307 if orig_parent == p1:
308 308 update_dirstate = 1
309 309 else:
310 310 update_dirstate = 0
311 311
312 312 if not wlock:
313 313 wlock = self.wlock()
314 lock = self.lock()
314 l = self.lock()
315 315 tr = self.transaction()
316 316 mm = m1.copy()
317 317 mfm = mf1.copy()
318 318 linkrev = self.changelog.count()
319 319 for f in files:
320 320 try:
321 321 t = self.wread(f)
322 322 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
323 323 r = self.file(f)
324 324 mfm[f] = tm
325 325
326 326 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
327 327 if entry:
328 328 mm[f] = entry
329 329 continue
330 330
331 331 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
332 332 changed.append(f)
333 333 if update_dirstate:
334 334 self.dirstate.update([f], "n")
335 335 except IOError:
336 336 try:
337 337 del mm[f]
338 338 del mfm[f]
339 339 if update_dirstate:
340 340 self.dirstate.forget([f])
341 341 except:
342 342 # deleted from p2?
343 343 pass
344 344
345 345 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
346 346 user = user or self.ui.username()
347 347 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
348 348 tr.close()
349 349 if update_dirstate:
350 350 self.dirstate.setparents(n, nullid)
351 351
352 352 def commit(self, files=None, text="", user=None, date=None,
353 353 match=util.always, force=False, wlock=None):
354 354 commit = []
355 355 remove = []
356 356 changed = []
357 357
358 358 if files:
359 359 for f in files:
360 360 s = self.dirstate.state(f)
361 361 if s in 'nmai':
362 362 commit.append(f)
363 363 elif s == 'r':
364 364 remove.append(f)
365 365 else:
366 366 self.ui.warn(_("%s not tracked!\n") % f)
367 367 else:
368 368 modified, added, removed, deleted, unknown = self.changes(match=match)
369 369 commit = modified + added
370 370 remove = removed
371 371
372 372 p1, p2 = self.dirstate.parents()
373 373 c1 = self.changelog.read(p1)
374 374 c2 = self.changelog.read(p2)
375 375 m1 = self.manifest.read(c1[0])
376 376 mf1 = self.manifest.readflags(c1[0])
377 377 m2 = self.manifest.read(c2[0])
378 378
379 379 if not commit and not remove and not force and p2 == nullid:
380 380 self.ui.status(_("nothing changed\n"))
381 381 return None
382 382
383 383 xp1 = hex(p1)
384 384 if p2 == nullid: xp2 = ''
385 385 else: xp2 = hex(p2)
386 386
387 387 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
388 388
389 389 if not wlock:
390 390 wlock = self.wlock()
391 lock = self.lock()
391 l = self.lock()
392 392 tr = self.transaction()
393 393
394 394 # check in files
395 395 new = {}
396 396 linkrev = self.changelog.count()
397 397 commit.sort()
398 398 for f in commit:
399 399 self.ui.note(f + "\n")
400 400 try:
401 401 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
402 402 t = self.wread(f)
403 403 except IOError:
404 404 self.ui.warn(_("trouble committing %s!\n") % f)
405 405 raise
406 406
407 407 r = self.file(f)
408 408
409 409 meta = {}
410 410 cp = self.dirstate.copied(f)
411 411 if cp:
412 412 meta["copy"] = cp
413 413 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
414 414 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
415 415 fp1, fp2 = nullid, nullid
416 416 else:
417 417 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
418 418 if entry:
419 419 new[f] = entry
420 420 continue
421 421
422 422 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
423 423 # remember what we've added so that we can later calculate
424 424 # the files to pull from a set of changesets
425 425 changed.append(f)
426 426
427 427 # update manifest
428 428 m1 = m1.copy()
429 429 m1.update(new)
430 430 for f in remove:
431 431 if f in m1:
432 432 del m1[f]
433 433 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
434 434 (new, remove))
435 435
436 436 # add changeset
437 437 new = new.keys()
438 438 new.sort()
439 439
440 440 if not text:
441 441 edittext = [""]
442 442 if p2 != nullid:
443 443 edittext.append("HG: branch merge")
444 444 edittext.extend(["HG: changed %s" % f for f in changed])
445 445 edittext.extend(["HG: removed %s" % f for f in remove])
446 446 if not changed and not remove:
447 447 edittext.append("HG: no files changed")
448 448 edittext.append("")
449 449 # run editor in the repository root
450 450 olddir = os.getcwd()
451 451 os.chdir(self.root)
452 452 edittext = self.ui.edit("\n".join(edittext))
453 453 os.chdir(olddir)
454 454 if not edittext.rstrip():
455 455 return None
456 456 text = edittext
457 457
458 458 user = user or self.ui.username()
459 459 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
460 460 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
461 461 parent2=xp2)
462 462 tr.close()
463 463
464 464 self.dirstate.setparents(n)
465 465 self.dirstate.update(new, "n")
466 466 self.dirstate.forget(remove)
467 467
468 468 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
469 469 return n
470 470
471 471 def walk(self, node=None, files=[], match=util.always):
472 472 if node:
473 473 fdict = dict.fromkeys(files)
474 474 for fn in self.manifest.read(self.changelog.read(node)[0]):
475 475 fdict.pop(fn, None)
476 476 if match(fn):
477 477 yield 'm', fn
478 478 for fn in fdict:
479 479 self.ui.warn(_('%s: No such file in rev %s\n') % (
480 480 util.pathto(self.getcwd(), fn), short(node)))
481 481 else:
482 482 for src, fn in self.dirstate.walk(files, match):
483 483 yield src, fn
484 484
485 485 def changes(self, node1=None, node2=None, files=[], match=util.always,
486 486 wlock=None):
487 487 """return changes between two nodes or node and working directory
488 488
489 489 If node1 is None, use the first dirstate parent instead.
490 490 If node2 is None, compare node1 with working directory.
491 491 """
492 492
493 493 def fcmp(fn, mf):
494 494 t1 = self.wread(fn)
495 495 t2 = self.file(fn).read(mf.get(fn, nullid))
496 496 return cmp(t1, t2)
497 497
498 498 def mfmatches(node):
499 499 change = self.changelog.read(node)
500 500 mf = dict(self.manifest.read(change[0]))
501 501 for fn in mf.keys():
502 502 if not match(fn):
503 503 del mf[fn]
504 504 return mf
505 505
506 506 # are we comparing the working directory?
507 507 if not node2:
508 508 if not wlock:
509 509 try:
510 510 wlock = self.wlock(wait=0)
511 511 except lock.LockHeld:
512 512 wlock = None
513 513 lookup, modified, added, removed, deleted, unknown = (
514 514 self.dirstate.changes(files, match))
515 515
516 516 # are we comparing working dir against its parent?
517 517 if not node1:
518 518 if lookup:
519 519 # do a full compare of any files that might have changed
520 520 mf2 = mfmatches(self.dirstate.parents()[0])
521 521 for f in lookup:
522 522 if fcmp(f, mf2):
523 523 modified.append(f)
524 524 elif wlock is not None:
525 525 self.dirstate.update([f], "n")
526 526 else:
527 527 # we are comparing working dir against non-parent
528 528 # generate a pseudo-manifest for the working dir
529 529 mf2 = mfmatches(self.dirstate.parents()[0])
530 530 for f in lookup + modified + added:
531 531 mf2[f] = ""
532 532 for f in removed:
533 533 if f in mf2:
534 534 del mf2[f]
535 535 else:
536 536 # we are comparing two revisions
537 537 deleted, unknown = [], []
538 538 mf2 = mfmatches(node2)
539 539
540 540 if node1:
541 541 # flush lists from dirstate before comparing manifests
542 542 modified, added = [], []
543 543
544 544 mf1 = mfmatches(node1)
545 545
546 546 for fn in mf2:
547 547 if mf1.has_key(fn):
548 548 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
549 549 modified.append(fn)
550 550 del mf1[fn]
551 551 else:
552 552 added.append(fn)
553 553
554 554 removed = mf1.keys()
555 555
556 556 # sort and return results:
557 557 for l in modified, added, removed, deleted, unknown:
558 558 l.sort()
559 559 return (modified, added, removed, deleted, unknown)
560 560
561 561 def add(self, list, wlock=None):
562 562 if not wlock:
563 563 wlock = self.wlock()
564 564 for f in list:
565 565 p = self.wjoin(f)
566 566 if not os.path.exists(p):
567 567 self.ui.warn(_("%s does not exist!\n") % f)
568 568 elif not os.path.isfile(p):
569 569 self.ui.warn(_("%s not added: only files supported currently\n")
570 570 % f)
571 571 elif self.dirstate.state(f) in 'an':
572 572 self.ui.warn(_("%s already tracked!\n") % f)
573 573 else:
574 574 self.dirstate.update([f], "a")
575 575
576 576 def forget(self, list, wlock=None):
577 577 if not wlock:
578 578 wlock = self.wlock()
579 579 for f in list:
580 580 if self.dirstate.state(f) not in 'ai':
581 581 self.ui.warn(_("%s not added!\n") % f)
582 582 else:
583 583 self.dirstate.forget([f])
584 584
585 585 def remove(self, list, unlink=False, wlock=None):
586 586 if unlink:
587 587 for f in list:
588 588 try:
589 589 util.unlink(self.wjoin(f))
590 590 except OSError, inst:
591 591 if inst.errno != errno.ENOENT:
592 592 raise
593 593 if not wlock:
594 594 wlock = self.wlock()
595 595 for f in list:
596 596 p = self.wjoin(f)
597 597 if os.path.exists(p):
598 598 self.ui.warn(_("%s still exists!\n") % f)
599 599 elif self.dirstate.state(f) == 'a':
600 600 self.dirstate.forget([f])
601 601 elif f not in self.dirstate:
602 602 self.ui.warn(_("%s not tracked!\n") % f)
603 603 else:
604 604 self.dirstate.update([f], "r")
605 605
606 606 def undelete(self, list, wlock=None):
607 607 p = self.dirstate.parents()[0]
608 608 mn = self.changelog.read(p)[0]
609 609 mf = self.manifest.readflags(mn)
610 610 m = self.manifest.read(mn)
611 611 if not wlock:
612 612 wlock = self.wlock()
613 613 for f in list:
614 614 if self.dirstate.state(f) not in "r":
615 615 self.ui.warn("%s not removed!\n" % f)
616 616 else:
617 617 t = self.file(f).read(m[f])
618 618 self.wwrite(f, t)
619 619 util.set_exec(self.wjoin(f), mf[f])
620 620 self.dirstate.update([f], "n")
621 621
622 622 def copy(self, source, dest, wlock=None):
623 623 p = self.wjoin(dest)
624 624 if not os.path.exists(p):
625 625 self.ui.warn(_("%s does not exist!\n") % dest)
626 626 elif not os.path.isfile(p):
627 627 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
628 628 else:
629 629 if not wlock:
630 630 wlock = self.wlock()
631 631 if self.dirstate.state(dest) == '?':
632 632 self.dirstate.update([dest], "a")
633 633 self.dirstate.copy(source, dest)
634 634
635 635 def heads(self, start=None):
636 636 heads = self.changelog.heads(start)
637 637 # sort the output in rev descending order
638 638 heads = [(-self.changelog.rev(h), h) for h in heads]
639 639 heads.sort()
640 640 return [n for (r, n) in heads]
641 641
642 642 # branchlookup returns a dict giving a list of branches for
643 643 # each head. A branch is defined as the tag of a node or
644 644 # the branch of the node's parents. If a node has multiple
645 645 # branch tags, tags are eliminated if they are visible from other
646 646 # branch tags.
647 647 #
648 648 # So, for this graph: a->b->c->d->e
649 649 # \ /
650 650 # aa -----/
651 651 # a has tag 2.6.12
652 652 # d has tag 2.6.13
653 653 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
654 654 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
655 655 # from the list.
656 656 #
657 657 # It is possible that more than one head will have the same branch tag.
658 658 # callers need to check the result for multiple heads under the same
659 659 # branch tag if that is a problem for them (ie checkout of a specific
660 660 # branch).
661 661 #
662 662 # passing in a specific branch will limit the depth of the search
663 663 # through the parents. It won't limit the branches returned in the
664 664 # result though.
665 665 def branchlookup(self, heads=None, branch=None):
666 666 if not heads:
667 667 heads = self.heads()
668 668 headt = [ h for h in heads ]
669 669 chlog = self.changelog
670 670 branches = {}
671 671 merges = []
672 672 seenmerge = {}
673 673
674 674 # traverse the tree once for each head, recording in the branches
675 675 # dict which tags are visible from this head. The branches
676 676 # dict also records which tags are visible from each tag
677 677 # while we traverse.
678 678 while headt or merges:
679 679 if merges:
680 680 n, found = merges.pop()
681 681 visit = [n]
682 682 else:
683 683 h = headt.pop()
684 684 visit = [h]
685 685 found = [h]
686 686 seen = {}
687 687 while visit:
688 688 n = visit.pop()
689 689 if n in seen:
690 690 continue
691 691 pp = chlog.parents(n)
692 692 tags = self.nodetags(n)
693 693 if tags:
694 694 for x in tags:
695 695 if x == 'tip':
696 696 continue
697 697 for f in found:
698 698 branches.setdefault(f, {})[n] = 1
699 699 branches.setdefault(n, {})[n] = 1
700 700 break
701 701 if n not in found:
702 702 found.append(n)
703 703 if branch in tags:
704 704 continue
705 705 seen[n] = 1
706 706 if pp[1] != nullid and n not in seenmerge:
707 707 merges.append((pp[1], [x for x in found]))
708 708 seenmerge[n] = 1
709 709 if pp[0] != nullid:
710 710 visit.append(pp[0])
711 711 # traverse the branches dict, eliminating branch tags from each
712 712 # head that are visible from another branch tag for that head.
713 713 out = {}
714 714 viscache = {}
715 715 for h in heads:
716 716 def visible(node):
717 717 if node in viscache:
718 718 return viscache[node]
719 719 ret = {}
720 720 visit = [node]
721 721 while visit:
722 722 x = visit.pop()
723 723 if x in viscache:
724 724 ret.update(viscache[x])
725 725 elif x not in ret:
726 726 ret[x] = 1
727 727 if x in branches:
728 728 visit[len(visit):] = branches[x].keys()
729 729 viscache[node] = ret
730 730 return ret
731 731 if h not in branches:
732 732 continue
733 733 # O(n^2), but somewhat limited. This only searches the
734 734 # tags visible from a specific head, not all the tags in the
735 735 # whole repo.
736 736 for b in branches[h]:
737 737 vis = False
738 738 for bb in branches[h].keys():
739 739 if b != bb:
740 740 if b in visible(bb):
741 741 vis = True
742 742 break
743 743 if not vis:
744 744 l = out.setdefault(h, [])
745 745 l[len(l):] = self.nodetags(b)
746 746 return out
747 747
748 748 def branches(self, nodes):
749 749 if not nodes:
750 750 nodes = [self.changelog.tip()]
751 751 b = []
752 752 for n in nodes:
753 753 t = n
754 754 while n:
755 755 p = self.changelog.parents(n)
756 756 if p[1] != nullid or p[0] == nullid:
757 757 b.append((t, n, p[0], p[1]))
758 758 break
759 759 n = p[0]
760 760 return b
761 761
762 762 def between(self, pairs):
763 763 r = []
764 764
765 765 for top, bottom in pairs:
766 766 n, l, i = top, [], 0
767 767 f = 1
768 768
769 769 while n != bottom:
770 770 p = self.changelog.parents(n)[0]
771 771 if i == f:
772 772 l.append(n)
773 773 f = f * 2
774 774 n = p
775 775 i += 1
776 776
777 777 r.append(l)
778 778
779 779 return r
780 780
781 781 def findincoming(self, remote, base=None, heads=None):
782 782 m = self.changelog.nodemap
783 783 search = []
784 784 fetch = {}
785 785 seen = {}
786 786 seenbranch = {}
787 787 if base == None:
788 788 base = {}
789 789
790 790 # assume we're closer to the tip than the root
791 791 # and start by examining the heads
792 792 self.ui.status(_("searching for changes\n"))
793 793
794 794 if not heads:
795 795 heads = remote.heads()
796 796
797 797 unknown = []
798 798 for h in heads:
799 799 if h not in m:
800 800 unknown.append(h)
801 801 else:
802 802 base[h] = 1
803 803
804 804 if not unknown:
805 805 return None
806 806
807 807 rep = {}
808 808 reqcnt = 0
809 809
810 810 # search through remote branches
811 811 # a 'branch' here is a linear segment of history, with four parts:
812 812 # head, root, first parent, second parent
813 813 # (a branch always has two parents (or none) by definition)
814 814 unknown = remote.branches(unknown)
815 815 while unknown:
816 816 r = []
817 817 while unknown:
818 818 n = unknown.pop(0)
819 819 if n[0] in seen:
820 820 continue
821 821
822 822 self.ui.debug(_("examining %s:%s\n")
823 823 % (short(n[0]), short(n[1])))
824 824 if n[0] == nullid:
825 825 break
826 826 if n in seenbranch:
827 827 self.ui.debug(_("branch already found\n"))
828 828 continue
829 829 if n[1] and n[1] in m: # do we know the base?
830 830 self.ui.debug(_("found incomplete branch %s:%s\n")
831 831 % (short(n[0]), short(n[1])))
832 832 search.append(n) # schedule branch range for scanning
833 833 seenbranch[n] = 1
834 834 else:
835 835 if n[1] not in seen and n[1] not in fetch:
836 836 if n[2] in m and n[3] in m:
837 837 self.ui.debug(_("found new changeset %s\n") %
838 838 short(n[1]))
839 839 fetch[n[1]] = 1 # earliest unknown
840 840 base[n[2]] = 1 # latest known
841 841 continue
842 842
843 843 for a in n[2:4]:
844 844 if a not in rep:
845 845 r.append(a)
846 846 rep[a] = 1
847 847
848 848 seen[n[0]] = 1
849 849
850 850 if r:
851 851 reqcnt += 1
852 852 self.ui.debug(_("request %d: %s\n") %
853 853 (reqcnt, " ".join(map(short, r))))
854 854 for p in range(0, len(r), 10):
855 855 for b in remote.branches(r[p:p+10]):
856 856 self.ui.debug(_("received %s:%s\n") %
857 857 (short(b[0]), short(b[1])))
858 858 if b[0] in m:
859 859 self.ui.debug(_("found base node %s\n")
860 860 % short(b[0]))
861 861 base[b[0]] = 1
862 862 elif b[0] not in seen:
863 863 unknown.append(b)
864 864
865 865 # do binary search on the branches we found
866 866 while search:
867 867 n = search.pop(0)
868 868 reqcnt += 1
869 869 l = remote.between([(n[0], n[1])])[0]
870 870 l.append(n[1])
871 871 p = n[0]
872 872 f = 1
873 873 for i in l:
874 874 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
875 875 if i in m:
876 876 if f <= 2:
877 877 self.ui.debug(_("found new branch changeset %s\n") %
878 878 short(p))
879 879 fetch[p] = 1
880 880 base[i] = 1
881 881 else:
882 882 self.ui.debug(_("narrowed branch search to %s:%s\n")
883 883 % (short(p), short(i)))
884 884 search.append((p, i))
885 885 break
886 886 p, f = i, f * 2
887 887
888 888 # sanity check our fetch list
889 889 for f in fetch.keys():
890 890 if f in m:
891 891 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
892 892
893 893 if base.keys() == [nullid]:
894 894 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
895 895
896 896 self.ui.note(_("found new changesets starting at ") +
897 897 " ".join([short(f) for f in fetch]) + "\n")
898 898
899 899 self.ui.debug(_("%d total queries\n") % reqcnt)
900 900
901 901 return fetch.keys()
902 902
903 903 def findoutgoing(self, remote, base=None, heads=None):
904 904 if base == None:
905 905 base = {}
906 906 self.findincoming(remote, base, heads)
907 907
908 908 self.ui.debug(_("common changesets up to ")
909 909 + " ".join(map(short, base.keys())) + "\n")
910 910
911 911 remain = dict.fromkeys(self.changelog.nodemap)
912 912
913 913 # prune everything remote has from the tree
914 914 del remain[nullid]
915 915 remove = base.keys()
916 916 while remove:
917 917 n = remove.pop(0)
918 918 if n in remain:
919 919 del remain[n]
920 920 for p in self.changelog.parents(n):
921 921 remove.append(p)
922 922
923 923 # find every node whose parents have been pruned
924 924 subset = []
925 925 for n in remain:
926 926 p1, p2 = self.changelog.parents(n)
927 927 if p1 not in remain and p2 not in remain:
928 928 subset.append(n)
929 929
930 930 # this is the set of all roots we have to push
931 931 return subset
932 932
933 933 def pull(self, remote, heads=None):
934 lock = self.lock()
934 l = self.lock()
935 935
936 936 # if we have an empty repo, fetch everything
937 937 if self.changelog.tip() == nullid:
938 938 self.ui.status(_("requesting all changes\n"))
939 939 fetch = [nullid]
940 940 else:
941 941 fetch = self.findincoming(remote)
942 942
943 943 if not fetch:
944 944 self.ui.status(_("no changes found\n"))
945 945 return 1
946 946
947 947 if heads is None:
948 948 cg = remote.changegroup(fetch, 'pull')
949 949 else:
950 950 cg = remote.changegroupsubset(fetch, heads, 'pull')
951 951 return self.addchangegroup(cg)
952 952
953 953 def push(self, remote, force=False):
954 lock = remote.lock()
954 l = remote.lock()
955 955
956 956 base = {}
957 957 heads = remote.heads()
958 958 inc = self.findincoming(remote, base, heads)
959 959 if not force and inc:
960 960 self.ui.warn(_("abort: unsynced remote changes!\n"))
961 961 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
962 962 return 1
963 963
964 964 update = self.findoutgoing(remote, base)
965 965 if not update:
966 966 self.ui.status(_("no changes found\n"))
967 967 return 1
968 968 elif not force:
969 969 if len(heads) < len(self.changelog.heads()):
970 970 self.ui.warn(_("abort: push creates new remote branches!\n"))
971 971 self.ui.status(_("(did you forget to merge?"
972 972 " use push -f to force)\n"))
973 973 return 1
974 974
975 975 cg = self.changegroup(update, 'push')
976 976 return remote.addchangegroup(cg)
977 977
978 978 def changegroupsubset(self, bases, heads, source):
979 979 """This function generates a changegroup consisting of all the nodes
980 980 that are descendents of any of the bases, and ancestors of any of
981 981 the heads.
982 982
983 983 It is fairly complex as determining which filenodes and which
984 984 manifest nodes need to be included for the changeset to be complete
985 985 is non-trivial.
986 986
987 987 Another wrinkle is doing the reverse, figuring out which changeset in
988 988 the changegroup a particular filenode or manifestnode belongs to."""
989 989
990 990 self.hook('preoutgoing', throw=True, source=source)
991 991
992 992 # Set up some initial variables
993 993 # Make it easy to refer to self.changelog
994 994 cl = self.changelog
995 995 # msng is short for missing - compute the list of changesets in this
996 996 # changegroup.
997 997 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
998 998 # Some bases may turn out to be superfluous, and some heads may be
999 999 # too. nodesbetween will return the minimal set of bases and heads
1000 1000 # necessary to re-create the changegroup.
1001 1001
1002 1002 # Known heads are the list of heads that it is assumed the recipient
1003 1003 # of this changegroup will know about.
1004 1004 knownheads = {}
1005 1005 # We assume that all parents of bases are known heads.
1006 1006 for n in bases:
1007 1007 for p in cl.parents(n):
1008 1008 if p != nullid:
1009 1009 knownheads[p] = 1
1010 1010 knownheads = knownheads.keys()
1011 1011 if knownheads:
1012 1012 # Now that we know what heads are known, we can compute which
1013 1013 # changesets are known. The recipient must know about all
1014 1014 # changesets required to reach the known heads from the null
1015 1015 # changeset.
1016 1016 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1017 1017 junk = None
1018 1018 # Transform the list into an ersatz set.
1019 1019 has_cl_set = dict.fromkeys(has_cl_set)
1020 1020 else:
1021 1021 # If there were no known heads, the recipient cannot be assumed to
1022 1022 # know about any changesets.
1023 1023 has_cl_set = {}
1024 1024
1025 1025 # Make it easy to refer to self.manifest
1026 1026 mnfst = self.manifest
1027 1027 # We don't know which manifests are missing yet
1028 1028 msng_mnfst_set = {}
1029 1029 # Nor do we know which filenodes are missing.
1030 1030 msng_filenode_set = {}
1031 1031
1032 1032 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1033 1033 junk = None
1034 1034
1035 1035 # A changeset always belongs to itself, so the changenode lookup
1036 1036 # function for a changenode is identity.
1037 1037 def identity(x):
1038 1038 return x
1039 1039
1040 1040 # A function generating function. Sets up an environment for the
1041 1041 # inner function.
1042 1042 def cmp_by_rev_func(revlog):
1043 1043 # Compare two nodes by their revision number in the environment's
1044 1044 # revision history. Since the revision number both represents the
1045 1045 # most efficient order to read the nodes in, and represents a
1046 1046 # topological sorting of the nodes, this function is often useful.
1047 1047 def cmp_by_rev(a, b):
1048 1048 return cmp(revlog.rev(a), revlog.rev(b))
1049 1049 return cmp_by_rev
1050 1050
1051 1051 # If we determine that a particular file or manifest node must be a
1052 1052 # node that the recipient of the changegroup will already have, we can
1053 1053 # also assume the recipient will have all the parents. This function
1054 1054 # prunes them from the set of missing nodes.
1055 1055 def prune_parents(revlog, hasset, msngset):
1056 1056 haslst = hasset.keys()
1057 1057 haslst.sort(cmp_by_rev_func(revlog))
1058 1058 for node in haslst:
1059 1059 parentlst = [p for p in revlog.parents(node) if p != nullid]
1060 1060 while parentlst:
1061 1061 n = parentlst.pop()
1062 1062 if n not in hasset:
1063 1063 hasset[n] = 1
1064 1064 p = [p for p in revlog.parents(n) if p != nullid]
1065 1065 parentlst.extend(p)
1066 1066 for n in hasset:
1067 1067 msngset.pop(n, None)
1068 1068
1069 1069 # This is a function generating function used to set up an environment
1070 1070 # for the inner function to execute in.
1071 1071 def manifest_and_file_collector(changedfileset):
1072 1072 # This is an information gathering function that gathers
1073 1073 # information from each changeset node that goes out as part of
1074 1074 # the changegroup. The information gathered is a list of which
1075 1075 # manifest nodes are potentially required (the recipient may
1076 1076 # already have them) and total list of all files which were
1077 1077 # changed in any changeset in the changegroup.
1078 1078 #
1079 1079 # We also remember the first changenode we saw any manifest
1080 1080 # referenced by so we can later determine which changenode 'owns'
1081 1081 # the manifest.
1082 1082 def collect_manifests_and_files(clnode):
1083 1083 c = cl.read(clnode)
1084 1084 for f in c[3]:
1085 1085 # This is to make sure we only have one instance of each
1086 1086 # filename string for each filename.
1087 1087 changedfileset.setdefault(f, f)
1088 1088 msng_mnfst_set.setdefault(c[0], clnode)
1089 1089 return collect_manifests_and_files
1090 1090
1091 1091 # Figure out which manifest nodes (of the ones we think might be part
1092 1092 # of the changegroup) the recipient must know about and remove them
1093 1093 # from the changegroup.
1094 1094 def prune_manifests():
1095 1095 has_mnfst_set = {}
1096 1096 for n in msng_mnfst_set:
1097 1097 # If a 'missing' manifest thinks it belongs to a changenode
1098 1098 # the recipient is assumed to have, obviously the recipient
1099 1099 # must have that manifest.
1100 1100 linknode = cl.node(mnfst.linkrev(n))
1101 1101 if linknode in has_cl_set:
1102 1102 has_mnfst_set[n] = 1
1103 1103 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1104 1104
1105 1105 # Use the information collected in collect_manifests_and_files to say
1106 1106 # which changenode any manifestnode belongs to.
1107 1107 def lookup_manifest_link(mnfstnode):
1108 1108 return msng_mnfst_set[mnfstnode]
1109 1109
1110 1110 # A function generating function that sets up the initial environment
1111 1111 # the inner function.
1112 1112 def filenode_collector(changedfiles):
1113 1113 next_rev = [0]
1114 1114 # This gathers information from each manifestnode included in the
1115 1115 # changegroup about which filenodes the manifest node references
1116 1116 # so we can include those in the changegroup too.
1117 1117 #
1118 1118 # It also remembers which changenode each filenode belongs to. It
1119 1119 # does this by assuming the a filenode belongs to the changenode
1120 1120 # the first manifest that references it belongs to.
1121 1121 def collect_msng_filenodes(mnfstnode):
1122 1122 r = mnfst.rev(mnfstnode)
1123 1123 if r == next_rev[0]:
1124 1124 # If the last rev we looked at was the one just previous,
1125 1125 # we only need to see a diff.
1126 1126 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1127 1127 # For each line in the delta
1128 1128 for dline in delta.splitlines():
1129 1129 # get the filename and filenode for that line
1130 1130 f, fnode = dline.split('\0')
1131 1131 fnode = bin(fnode[:40])
1132 1132 f = changedfiles.get(f, None)
1133 1133 # And if the file is in the list of files we care
1134 1134 # about.
1135 1135 if f is not None:
1136 1136 # Get the changenode this manifest belongs to
1137 1137 clnode = msng_mnfst_set[mnfstnode]
1138 1138 # Create the set of filenodes for the file if
1139 1139 # there isn't one already.
1140 1140 ndset = msng_filenode_set.setdefault(f, {})
1141 1141 # And set the filenode's changelog node to the
1142 1142 # manifest's if it hasn't been set already.
1143 1143 ndset.setdefault(fnode, clnode)
1144 1144 else:
1145 1145 # Otherwise we need a full manifest.
1146 1146 m = mnfst.read(mnfstnode)
1147 1147 # For every file in we care about.
1148 1148 for f in changedfiles:
1149 1149 fnode = m.get(f, None)
1150 1150 # If it's in the manifest
1151 1151 if fnode is not None:
1152 1152 # See comments above.
1153 1153 clnode = msng_mnfst_set[mnfstnode]
1154 1154 ndset = msng_filenode_set.setdefault(f, {})
1155 1155 ndset.setdefault(fnode, clnode)
1156 1156 # Remember the revision we hope to see next.
1157 1157 next_rev[0] = r + 1
1158 1158 return collect_msng_filenodes
1159 1159
1160 1160 # We have a list of filenodes we think we need for a file, lets remove
1161 1161 # all those we now the recipient must have.
1162 1162 def prune_filenodes(f, filerevlog):
1163 1163 msngset = msng_filenode_set[f]
1164 1164 hasset = {}
1165 1165 # If a 'missing' filenode thinks it belongs to a changenode we
1166 1166 # assume the recipient must have, then the recipient must have
1167 1167 # that filenode.
1168 1168 for n in msngset:
1169 1169 clnode = cl.node(filerevlog.linkrev(n))
1170 1170 if clnode in has_cl_set:
1171 1171 hasset[n] = 1
1172 1172 prune_parents(filerevlog, hasset, msngset)
1173 1173
1174 1174 # A function generator function that sets up the a context for the
1175 1175 # inner function.
1176 1176 def lookup_filenode_link_func(fname):
1177 1177 msngset = msng_filenode_set[fname]
1178 1178 # Lookup the changenode the filenode belongs to.
1179 1179 def lookup_filenode_link(fnode):
1180 1180 return msngset[fnode]
1181 1181 return lookup_filenode_link
1182 1182
1183 1183 # Now that we have all theses utility functions to help out and
1184 1184 # logically divide up the task, generate the group.
1185 1185 def gengroup():
1186 1186 # The set of changed files starts empty.
1187 1187 changedfiles = {}
1188 1188 # Create a changenode group generator that will call our functions
1189 1189 # back to lookup the owning changenode and collect information.
1190 1190 group = cl.group(msng_cl_lst, identity,
1191 1191 manifest_and_file_collector(changedfiles))
1192 1192 for chnk in group:
1193 1193 yield chnk
1194 1194
1195 1195 # The list of manifests has been collected by the generator
1196 1196 # calling our functions back.
1197 1197 prune_manifests()
1198 1198 msng_mnfst_lst = msng_mnfst_set.keys()
1199 1199 # Sort the manifestnodes by revision number.
1200 1200 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1201 1201 # Create a generator for the manifestnodes that calls our lookup
1202 1202 # and data collection functions back.
1203 1203 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1204 1204 filenode_collector(changedfiles))
1205 1205 for chnk in group:
1206 1206 yield chnk
1207 1207
1208 1208 # These are no longer needed, dereference and toss the memory for
1209 1209 # them.
1210 1210 msng_mnfst_lst = None
1211 1211 msng_mnfst_set.clear()
1212 1212
1213 1213 changedfiles = changedfiles.keys()
1214 1214 changedfiles.sort()
1215 1215 # Go through all our files in order sorted by name.
1216 1216 for fname in changedfiles:
1217 1217 filerevlog = self.file(fname)
1218 1218 # Toss out the filenodes that the recipient isn't really
1219 1219 # missing.
1220 1220 if msng_filenode_set.has_key(fname):
1221 1221 prune_filenodes(fname, filerevlog)
1222 1222 msng_filenode_lst = msng_filenode_set[fname].keys()
1223 1223 else:
1224 1224 msng_filenode_lst = []
1225 1225 # If any filenodes are left, generate the group for them,
1226 1226 # otherwise don't bother.
1227 1227 if len(msng_filenode_lst) > 0:
1228 1228 yield struct.pack(">l", len(fname) + 4) + fname
1229 1229 # Sort the filenodes by their revision #
1230 1230 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1231 1231 # Create a group generator and only pass in a changenode
1232 1232 # lookup function as we need to collect no information
1233 1233 # from filenodes.
1234 1234 group = filerevlog.group(msng_filenode_lst,
1235 1235 lookup_filenode_link_func(fname))
1236 1236 for chnk in group:
1237 1237 yield chnk
1238 1238 if msng_filenode_set.has_key(fname):
1239 1239 # Don't need this anymore, toss it to free memory.
1240 1240 del msng_filenode_set[fname]
1241 1241 # Signal that no more groups are left.
1242 1242 yield struct.pack(">l", 0)
1243 1243
1244 1244 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1245 1245
1246 1246 return util.chunkbuffer(gengroup())
1247 1247
1248 1248 def changegroup(self, basenodes, source):
1249 1249 """Generate a changegroup of all nodes that we have that a recipient
1250 1250 doesn't.
1251 1251
1252 1252 This is much easier than the previous function as we can assume that
1253 1253 the recipient has any changenode we aren't sending them."""
1254 1254
1255 1255 self.hook('preoutgoing', throw=True, source=source)
1256 1256
1257 1257 cl = self.changelog
1258 1258 nodes = cl.nodesbetween(basenodes, None)[0]
1259 1259 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1260 1260
1261 1261 def identity(x):
1262 1262 return x
1263 1263
1264 1264 def gennodelst(revlog):
1265 1265 for r in xrange(0, revlog.count()):
1266 1266 n = revlog.node(r)
1267 1267 if revlog.linkrev(n) in revset:
1268 1268 yield n
1269 1269
1270 1270 def changed_file_collector(changedfileset):
1271 1271 def collect_changed_files(clnode):
1272 1272 c = cl.read(clnode)
1273 1273 for fname in c[3]:
1274 1274 changedfileset[fname] = 1
1275 1275 return collect_changed_files
1276 1276
1277 1277 def lookuprevlink_func(revlog):
1278 1278 def lookuprevlink(n):
1279 1279 return cl.node(revlog.linkrev(n))
1280 1280 return lookuprevlink
1281 1281
1282 1282 def gengroup():
1283 1283 # construct a list of all changed files
1284 1284 changedfiles = {}
1285 1285
1286 1286 for chnk in cl.group(nodes, identity,
1287 1287 changed_file_collector(changedfiles)):
1288 1288 yield chnk
1289 1289 changedfiles = changedfiles.keys()
1290 1290 changedfiles.sort()
1291 1291
1292 1292 mnfst = self.manifest
1293 1293 nodeiter = gennodelst(mnfst)
1294 1294 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1295 1295 yield chnk
1296 1296
1297 1297 for fname in changedfiles:
1298 1298 filerevlog = self.file(fname)
1299 1299 nodeiter = gennodelst(filerevlog)
1300 1300 nodeiter = list(nodeiter)
1301 1301 if nodeiter:
1302 1302 yield struct.pack(">l", len(fname) + 4) + fname
1303 1303 lookup = lookuprevlink_func(filerevlog)
1304 1304 for chnk in filerevlog.group(nodeiter, lookup):
1305 1305 yield chnk
1306 1306
1307 1307 yield struct.pack(">l", 0)
1308 1308 self.hook('outgoing', node=hex(nodes[0]), source=source)
1309 1309
1310 1310 return util.chunkbuffer(gengroup())
1311 1311
1312 1312 def addchangegroup(self, source):
1313 1313
1314 1314 def getchunk():
1315 1315 d = source.read(4)
1316 1316 if not d:
1317 1317 return ""
1318 1318 l = struct.unpack(">l", d)[0]
1319 1319 if l <= 4:
1320 1320 return ""
1321 1321 d = source.read(l - 4)
1322 1322 if len(d) < l - 4:
1323 1323 raise repo.RepoError(_("premature EOF reading chunk"
1324 1324 " (got %d bytes, expected %d)")
1325 1325 % (len(d), l - 4))
1326 1326 return d
1327 1327
1328 1328 def getgroup():
1329 1329 while 1:
1330 1330 c = getchunk()
1331 1331 if not c:
1332 1332 break
1333 1333 yield c
1334 1334
1335 1335 def csmap(x):
1336 1336 self.ui.debug(_("add changeset %s\n") % short(x))
1337 1337 return self.changelog.count()
1338 1338
1339 1339 def revmap(x):
1340 1340 return self.changelog.rev(x)
1341 1341
1342 1342 if not source:
1343 1343 return
1344 1344
1345 1345 self.hook('prechangegroup', throw=True)
1346 1346
1347 1347 changesets = files = revisions = 0
1348 1348
1349 1349 tr = self.transaction()
1350 1350
1351 1351 oldheads = len(self.changelog.heads())
1352 1352
1353 1353 # pull off the changeset group
1354 1354 self.ui.status(_("adding changesets\n"))
1355 1355 co = self.changelog.tip()
1356 1356 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1357 1357 cnr, cor = map(self.changelog.rev, (cn, co))
1358 1358 if cn == nullid:
1359 1359 cnr = cor
1360 1360 changesets = cnr - cor
1361 1361
1362 1362 # pull off the manifest group
1363 1363 self.ui.status(_("adding manifests\n"))
1364 1364 mm = self.manifest.tip()
1365 1365 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1366 1366
1367 1367 # process the files
1368 1368 self.ui.status(_("adding file changes\n"))
1369 1369 while 1:
1370 1370 f = getchunk()
1371 1371 if not f:
1372 1372 break
1373 1373 self.ui.debug(_("adding %s revisions\n") % f)
1374 1374 fl = self.file(f)
1375 1375 o = fl.count()
1376 1376 n = fl.addgroup(getgroup(), revmap, tr)
1377 1377 revisions += fl.count() - o
1378 1378 files += 1
1379 1379
1380 1380 newheads = len(self.changelog.heads())
1381 1381 heads = ""
1382 1382 if oldheads and newheads > oldheads:
1383 1383 heads = _(" (+%d heads)") % (newheads - oldheads)
1384 1384
1385 1385 self.ui.status(_("added %d changesets"
1386 1386 " with %d changes to %d files%s\n")
1387 1387 % (changesets, revisions, files, heads))
1388 1388
1389 1389 self.hook('pretxnchangegroup', throw=True,
1390 1390 node=hex(self.changelog.node(cor+1)))
1391 1391
1392 1392 tr.close()
1393 1393
1394 1394 if changesets > 0:
1395 1395 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1396 1396
1397 1397 for i in range(cor + 1, cnr + 1):
1398 1398 self.hook("incoming", node=hex(self.changelog.node(i)))
1399 1399
1400 1400 def update(self, node, allow=False, force=False, choose=None,
1401 1401 moddirstate=True, forcemerge=False, wlock=None):
1402 1402 pl = self.dirstate.parents()
1403 1403 if not force and pl[1] != nullid:
1404 1404 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1405 1405 return 1
1406 1406
1407 1407 err = False
1408 1408
1409 1409 p1, p2 = pl[0], node
1410 1410 pa = self.changelog.ancestor(p1, p2)
1411 1411 m1n = self.changelog.read(p1)[0]
1412 1412 m2n = self.changelog.read(p2)[0]
1413 1413 man = self.manifest.ancestor(m1n, m2n)
1414 1414 m1 = self.manifest.read(m1n)
1415 1415 mf1 = self.manifest.readflags(m1n)
1416 1416 m2 = self.manifest.read(m2n).copy()
1417 1417 mf2 = self.manifest.readflags(m2n)
1418 1418 ma = self.manifest.read(man)
1419 1419 mfa = self.manifest.readflags(man)
1420 1420
1421 1421 modified, added, removed, deleted, unknown = self.changes()
1422 1422
1423 1423 # is this a jump, or a merge? i.e. is there a linear path
1424 1424 # from p1 to p2?
1425 1425 linear_path = (pa == p1 or pa == p2)
1426 1426
1427 1427 if allow and linear_path:
1428 1428 raise util.Abort(_("there is nothing to merge, "
1429 1429 "just use 'hg update'"))
1430 1430 if allow and not forcemerge:
1431 1431 if modified or added or removed:
1432 1432 raise util.Abort(_("outstanding uncommited changes"))
1433 1433 if not forcemerge and not force:
1434 1434 for f in unknown:
1435 1435 if f in m2:
1436 1436 t1 = self.wread(f)
1437 1437 t2 = self.file(f).read(m2[f])
1438 1438 if cmp(t1, t2) != 0:
1439 1439 raise util.Abort(_("'%s' already exists in the working"
1440 1440 " dir and differs from remote") % f)
1441 1441
1442 1442 # resolve the manifest to determine which files
1443 1443 # we care about merging
1444 1444 self.ui.note(_("resolving manifests\n"))
1445 1445 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1446 1446 (force, allow, moddirstate, linear_path))
1447 1447 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1448 1448 (short(man), short(m1n), short(m2n)))
1449 1449
1450 1450 merge = {}
1451 1451 get = {}
1452 1452 remove = []
1453 1453
1454 1454 # construct a working dir manifest
1455 1455 mw = m1.copy()
1456 1456 mfw = mf1.copy()
1457 1457 umap = dict.fromkeys(unknown)
1458 1458
1459 1459 for f in added + modified + unknown:
1460 1460 mw[f] = ""
1461 1461 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1462 1462
1463 1463 if moddirstate and not wlock:
1464 1464 wlock = self.wlock()
1465 1465
1466 1466 for f in deleted + removed:
1467 1467 if f in mw:
1468 1468 del mw[f]
1469 1469
1470 1470 # If we're jumping between revisions (as opposed to merging),
1471 1471 # and if neither the working directory nor the target rev has
1472 1472 # the file, then we need to remove it from the dirstate, to
1473 1473 # prevent the dirstate from listing the file when it is no
1474 1474 # longer in the manifest.
1475 1475 if moddirstate and linear_path and f not in m2:
1476 1476 self.dirstate.forget((f,))
1477 1477
1478 1478 # Compare manifests
1479 1479 for f, n in mw.iteritems():
1480 1480 if choose and not choose(f):
1481 1481 continue
1482 1482 if f in m2:
1483 1483 s = 0
1484 1484
1485 1485 # is the wfile new since m1, and match m2?
1486 1486 if f not in m1:
1487 1487 t1 = self.wread(f)
1488 1488 t2 = self.file(f).read(m2[f])
1489 1489 if cmp(t1, t2) == 0:
1490 1490 n = m2[f]
1491 1491 del t1, t2
1492 1492
1493 1493 # are files different?
1494 1494 if n != m2[f]:
1495 1495 a = ma.get(f, nullid)
1496 1496 # are both different from the ancestor?
1497 1497 if n != a and m2[f] != a:
1498 1498 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1499 1499 # merge executable bits
1500 1500 # "if we changed or they changed, change in merge"
1501 1501 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1502 1502 mode = ((a^b) | (a^c)) ^ a
1503 1503 merge[f] = (m1.get(f, nullid), m2[f], mode)
1504 1504 s = 1
1505 1505 # are we clobbering?
1506 1506 # is remote's version newer?
1507 1507 # or are we going back in time?
1508 1508 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1509 1509 self.ui.debug(_(" remote %s is newer, get\n") % f)
1510 1510 get[f] = m2[f]
1511 1511 s = 1
1512 1512 elif f in umap:
1513 1513 # this unknown file is the same as the checkout
1514 1514 get[f] = m2[f]
1515 1515
1516 1516 if not s and mfw[f] != mf2[f]:
1517 1517 if force:
1518 1518 self.ui.debug(_(" updating permissions for %s\n") % f)
1519 1519 util.set_exec(self.wjoin(f), mf2[f])
1520 1520 else:
1521 1521 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1522 1522 mode = ((a^b) | (a^c)) ^ a
1523 1523 if mode != b:
1524 1524 self.ui.debug(_(" updating permissions for %s\n")
1525 1525 % f)
1526 1526 util.set_exec(self.wjoin(f), mode)
1527 1527 del m2[f]
1528 1528 elif f in ma:
1529 1529 if n != ma[f]:
1530 1530 r = _("d")
1531 1531 if not force and (linear_path or allow):
1532 1532 r = self.ui.prompt(
1533 1533 (_(" local changed %s which remote deleted\n") % f) +
1534 1534 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1535 1535 if r == _("d"):
1536 1536 remove.append(f)
1537 1537 else:
1538 1538 self.ui.debug(_("other deleted %s\n") % f)
1539 1539 remove.append(f) # other deleted it
1540 1540 else:
1541 1541 # file is created on branch or in working directory
1542 1542 if force and f not in umap:
1543 1543 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1544 1544 remove.append(f)
1545 1545 elif n == m1.get(f, nullid): # same as parent
1546 1546 if p2 == pa: # going backwards?
1547 1547 self.ui.debug(_("remote deleted %s\n") % f)
1548 1548 remove.append(f)
1549 1549 else:
1550 1550 self.ui.debug(_("local modified %s, keeping\n") % f)
1551 1551 else:
1552 1552 self.ui.debug(_("working dir created %s, keeping\n") % f)
1553 1553
1554 1554 for f, n in m2.iteritems():
1555 1555 if choose and not choose(f):
1556 1556 continue
1557 1557 if f[0] == "/":
1558 1558 continue
1559 1559 if f in ma and n != ma[f]:
1560 1560 r = _("k")
1561 1561 if not force and (linear_path or allow):
1562 1562 r = self.ui.prompt(
1563 1563 (_("remote changed %s which local deleted\n") % f) +
1564 1564 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1565 1565 if r == _("k"):
1566 1566 get[f] = n
1567 1567 elif f not in ma:
1568 1568 self.ui.debug(_("remote created %s\n") % f)
1569 1569 get[f] = n
1570 1570 else:
1571 1571 if force or p2 == pa: # going backwards?
1572 1572 self.ui.debug(_("local deleted %s, recreating\n") % f)
1573 1573 get[f] = n
1574 1574 else:
1575 1575 self.ui.debug(_("local deleted %s\n") % f)
1576 1576
1577 1577 del mw, m1, m2, ma
1578 1578
1579 1579 if force:
1580 1580 for f in merge:
1581 1581 get[f] = merge[f][1]
1582 1582 merge = {}
1583 1583
1584 1584 if linear_path or force:
1585 1585 # we don't need to do any magic, just jump to the new rev
1586 1586 branch_merge = False
1587 1587 p1, p2 = p2, nullid
1588 1588 else:
1589 1589 if not allow:
1590 1590 self.ui.status(_("this update spans a branch"
1591 1591 " affecting the following files:\n"))
1592 1592 fl = merge.keys() + get.keys()
1593 1593 fl.sort()
1594 1594 for f in fl:
1595 1595 cf = ""
1596 1596 if f in merge:
1597 1597 cf = _(" (resolve)")
1598 1598 self.ui.status(" %s%s\n" % (f, cf))
1599 1599 self.ui.warn(_("aborting update spanning branches!\n"))
1600 1600 self.ui.status(_("(use update -m to merge across branches"
1601 1601 " or -C to lose changes)\n"))
1602 1602 return 1
1603 1603 branch_merge = True
1604 1604
1605 1605 # get the files we don't need to change
1606 1606 files = get.keys()
1607 1607 files.sort()
1608 1608 for f in files:
1609 1609 if f[0] == "/":
1610 1610 continue
1611 1611 self.ui.note(_("getting %s\n") % f)
1612 1612 t = self.file(f).read(get[f])
1613 1613 self.wwrite(f, t)
1614 1614 util.set_exec(self.wjoin(f), mf2[f])
1615 1615 if moddirstate:
1616 1616 if branch_merge:
1617 1617 self.dirstate.update([f], 'n', st_mtime=-1)
1618 1618 else:
1619 1619 self.dirstate.update([f], 'n')
1620 1620
1621 1621 # merge the tricky bits
1622 1622 files = merge.keys()
1623 1623 files.sort()
1624 1624 for f in files:
1625 1625 self.ui.status(_("merging %s\n") % f)
1626 1626 my, other, flag = merge[f]
1627 1627 ret = self.merge3(f, my, other)
1628 1628 if ret:
1629 1629 err = True
1630 1630 util.set_exec(self.wjoin(f), flag)
1631 1631 if moddirstate:
1632 1632 if branch_merge:
1633 1633 # We've done a branch merge, mark this file as merged
1634 1634 # so that we properly record the merger later
1635 1635 self.dirstate.update([f], 'm')
1636 1636 else:
1637 1637 # We've update-merged a locally modified file, so
1638 1638 # we set the dirstate to emulate a normal checkout
1639 1639 # of that file some time in the past. Thus our
1640 1640 # merge will appear as a normal local file
1641 1641 # modification.
1642 1642 f_len = len(self.file(f).read(other))
1643 1643 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1644 1644
1645 1645 remove.sort()
1646 1646 for f in remove:
1647 1647 self.ui.note(_("removing %s\n") % f)
1648 1648 try:
1649 1649 util.unlink(self.wjoin(f))
1650 1650 except OSError, inst:
1651 1651 if inst.errno != errno.ENOENT:
1652 1652 self.ui.warn(_("update failed to remove %s: %s!\n") %
1653 1653 (f, inst.strerror))
1654 1654 if moddirstate:
1655 1655 if branch_merge:
1656 1656 self.dirstate.update(remove, 'r')
1657 1657 else:
1658 1658 self.dirstate.forget(remove)
1659 1659
1660 1660 if moddirstate:
1661 1661 self.dirstate.setparents(p1, p2)
1662 1662 return err
1663 1663
1664 1664 def merge3(self, fn, my, other):
1665 1665 """perform a 3-way merge in the working directory"""
1666 1666
1667 1667 def temp(prefix, node):
1668 1668 pre = "%s~%s." % (os.path.basename(fn), prefix)
1669 1669 (fd, name) = tempfile.mkstemp("", pre)
1670 1670 f = os.fdopen(fd, "wb")
1671 1671 self.wwrite(fn, fl.read(node), f)
1672 1672 f.close()
1673 1673 return name
1674 1674
1675 1675 fl = self.file(fn)
1676 1676 base = fl.ancestor(my, other)
1677 1677 a = self.wjoin(fn)
1678 1678 b = temp("base", base)
1679 1679 c = temp("other", other)
1680 1680
1681 1681 self.ui.note(_("resolving %s\n") % fn)
1682 1682 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1683 1683 (fn, short(my), short(other), short(base)))
1684 1684
1685 1685 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1686 1686 or "hgmerge")
1687 1687 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1688 1688 if r:
1689 1689 self.ui.warn(_("merging %s failed!\n") % fn)
1690 1690
1691 1691 os.unlink(b)
1692 1692 os.unlink(c)
1693 1693 return r
1694 1694
1695 1695 def verify(self):
1696 1696 filelinkrevs = {}
1697 1697 filenodes = {}
1698 1698 changesets = revisions = files = 0
1699 1699 errors = [0]
1700 1700 neededmanifests = {}
1701 1701
1702 1702 def err(msg):
1703 1703 self.ui.warn(msg + "\n")
1704 1704 errors[0] += 1
1705 1705
1706 1706 def checksize(obj, name):
1707 1707 d = obj.checksize()
1708 1708 if d[0]:
1709 1709 err(_("%s data length off by %d bytes") % (name, d[0]))
1710 1710 if d[1]:
1711 1711 err(_("%s index contains %d extra bytes") % (name, d[1]))
1712 1712
1713 1713 seen = {}
1714 1714 self.ui.status(_("checking changesets\n"))
1715 1715 checksize(self.changelog, "changelog")
1716 1716
1717 1717 for i in range(self.changelog.count()):
1718 1718 changesets += 1
1719 1719 n = self.changelog.node(i)
1720 1720 l = self.changelog.linkrev(n)
1721 1721 if l != i:
1722 1722 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1723 1723 if n in seen:
1724 1724 err(_("duplicate changeset at revision %d") % i)
1725 1725 seen[n] = 1
1726 1726
1727 1727 for p in self.changelog.parents(n):
1728 1728 if p not in self.changelog.nodemap:
1729 1729 err(_("changeset %s has unknown parent %s") %
1730 1730 (short(n), short(p)))
1731 1731 try:
1732 1732 changes = self.changelog.read(n)
1733 1733 except KeyboardInterrupt:
1734 1734 self.ui.warn(_("interrupted"))
1735 1735 raise
1736 1736 except Exception, inst:
1737 1737 err(_("unpacking changeset %s: %s") % (short(n), inst))
1738 1738
1739 1739 neededmanifests[changes[0]] = n
1740 1740
1741 1741 for f in changes[3]:
1742 1742 filelinkrevs.setdefault(f, []).append(i)
1743 1743
1744 1744 seen = {}
1745 1745 self.ui.status(_("checking manifests\n"))
1746 1746 checksize(self.manifest, "manifest")
1747 1747
1748 1748 for i in range(self.manifest.count()):
1749 1749 n = self.manifest.node(i)
1750 1750 l = self.manifest.linkrev(n)
1751 1751
1752 1752 if l < 0 or l >= self.changelog.count():
1753 1753 err(_("bad manifest link (%d) at revision %d") % (l, i))
1754 1754
1755 1755 if n in neededmanifests:
1756 1756 del neededmanifests[n]
1757 1757
1758 1758 if n in seen:
1759 1759 err(_("duplicate manifest at revision %d") % i)
1760 1760
1761 1761 seen[n] = 1
1762 1762
1763 1763 for p in self.manifest.parents(n):
1764 1764 if p not in self.manifest.nodemap:
1765 1765 err(_("manifest %s has unknown parent %s") %
1766 1766 (short(n), short(p)))
1767 1767
1768 1768 try:
1769 1769 delta = mdiff.patchtext(self.manifest.delta(n))
1770 1770 except KeyboardInterrupt:
1771 1771 self.ui.warn(_("interrupted"))
1772 1772 raise
1773 1773 except Exception, inst:
1774 1774 err(_("unpacking manifest %s: %s") % (short(n), inst))
1775 1775
1776 1776 ff = [ l.split('\0') for l in delta.splitlines() ]
1777 1777 for f, fn in ff:
1778 1778 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1779 1779
1780 1780 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1781 1781
1782 1782 for m, c in neededmanifests.items():
1783 1783 err(_("Changeset %s refers to unknown manifest %s") %
1784 1784 (short(m), short(c)))
1785 1785 del neededmanifests
1786 1786
1787 1787 for f in filenodes:
1788 1788 if f not in filelinkrevs:
1789 1789 err(_("file %s in manifest but not in changesets") % f)
1790 1790
1791 1791 for f in filelinkrevs:
1792 1792 if f not in filenodes:
1793 1793 err(_("file %s in changeset but not in manifest") % f)
1794 1794
1795 1795 self.ui.status(_("checking files\n"))
1796 1796 ff = filenodes.keys()
1797 1797 ff.sort()
1798 1798 for f in ff:
1799 1799 if f == "/dev/null":
1800 1800 continue
1801 1801 files += 1
1802 1802 fl = self.file(f)
1803 1803 checksize(fl, f)
1804 1804
1805 1805 nodes = {nullid: 1}
1806 1806 seen = {}
1807 1807 for i in range(fl.count()):
1808 1808 revisions += 1
1809 1809 n = fl.node(i)
1810 1810
1811 1811 if n in seen:
1812 1812 err(_("%s: duplicate revision %d") % (f, i))
1813 1813 if n not in filenodes[f]:
1814 1814 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1815 1815 else:
1816 1816 del filenodes[f][n]
1817 1817
1818 1818 flr = fl.linkrev(n)
1819 1819 if flr not in filelinkrevs[f]:
1820 1820 err(_("%s:%s points to unexpected changeset %d")
1821 1821 % (f, short(n), flr))
1822 1822 else:
1823 1823 filelinkrevs[f].remove(flr)
1824 1824
1825 1825 # verify contents
1826 1826 try:
1827 1827 t = fl.read(n)
1828 1828 except KeyboardInterrupt:
1829 1829 self.ui.warn(_("interrupted"))
1830 1830 raise
1831 1831 except Exception, inst:
1832 1832 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1833 1833
1834 1834 # verify parents
1835 1835 (p1, p2) = fl.parents(n)
1836 1836 if p1 not in nodes:
1837 1837 err(_("file %s:%s unknown parent 1 %s") %
1838 1838 (f, short(n), short(p1)))
1839 1839 if p2 not in nodes:
1840 1840 err(_("file %s:%s unknown parent 2 %s") %
1841 1841 (f, short(n), short(p1)))
1842 1842 nodes[n] = 1
1843 1843
1844 1844 # cross-check
1845 1845 for node in filenodes[f]:
1846 1846 err(_("node %s in manifests not in %s") % (hex(node), f))
1847 1847
1848 1848 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1849 1849 (files, changesets, revisions))
1850 1850
1851 1851 if errors[0]:
1852 1852 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1853 1853 return 1
@@ -1,870 +1,864
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
17 17
18 18 def hash(text, p1, p2):
19 19 """generate a hash from the given text and its parent hashes
20 20
21 21 This hash combines both the current file contents and its history
22 22 in a manner that makes it easy to distinguish nodes with the same
23 23 content in the revision graph.
24 24 """
25 25 l = [p1, p2]
26 26 l.sort()
27 27 s = sha.new(l[0])
28 28 s.update(l[1])
29 29 s.update(text)
30 30 return s.digest()
31 31
32 32 def compress(text):
33 33 """ generate a possibly-compressed representation of text """
34 34 if not text: return ("", text)
35 35 if len(text) < 44:
36 36 if text[0] == '\0': return ("", text)
37 37 return ('u', text)
38 38 bin = zlib.compress(text)
39 39 if len(bin) > len(text):
40 40 if text[0] == '\0': return ("", text)
41 41 return ('u', text)
42 42 return ("", bin)
43 43
44 44 def decompress(bin):
45 45 """ decompress the given input """
46 46 if not bin: return bin
47 47 t = bin[0]
48 48 if t == '\0': return bin
49 49 if t == 'x': return zlib.decompress(bin)
50 50 if t == 'u': return bin[1:]
51 51 raise RevlogError(_("unknown compression type %s") % t)
52 52
53 53 indexformat = ">4l20s20s20s"
54 54
55 55 class lazyparser(object):
56 56 """
57 57 this class avoids the need to parse the entirety of large indices
58 58
59 59 By default we parse and load 1000 entries at a time.
60 60
61 61 If no position is specified, we load the whole index, and replace
62 62 the lazy objects in revlog with the underlying objects for
63 63 efficiency in cases where we look at most of the nodes.
64 64 """
65 65 def __init__(self, data, revlog):
66 66 self.data = data
67 67 self.s = struct.calcsize(indexformat)
68 68 self.l = len(data)/self.s
69 69 self.index = [None] * self.l
70 70 self.map = {nullid: -1}
71 71 self.all = 0
72 72 self.revlog = revlog
73 73
74 74 def trunc(self, pos):
75 75 self.l = pos/self.s
76 76
77 77 def load(self, pos=None):
78 78 if self.all: return
79 79 if pos is not None:
80 80 block = pos / 1000
81 81 i = block * 1000
82 82 end = min(self.l, i + 1000)
83 83 else:
84 84 self.all = 1
85 85 i = 0
86 86 end = self.l
87 87 self.revlog.index = self.index
88 88 self.revlog.nodemap = self.map
89 89
90 90 while i < end:
91 91 d = self.data[i * self.s: (i + 1) * self.s]
92 92 e = struct.unpack(indexformat, d)
93 93 self.index[i] = e
94 94 self.map[e[6]] = i
95 95 i += 1
96 96
97 97 class lazyindex(object):
98 98 """a lazy version of the index array"""
99 99 def __init__(self, parser):
100 100 self.p = parser
101 101 def __len__(self):
102 102 return len(self.p.index)
103 103 def load(self, pos):
104 104 if pos < 0:
105 105 pos += len(self.p.index)
106 106 self.p.load(pos)
107 107 return self.p.index[pos]
108 108 def __getitem__(self, pos):
109 109 return self.p.index[pos] or self.load(pos)
110 110 def __delitem__(self, pos):
111 111 del self.p.index[pos]
112 112 def append(self, e):
113 113 self.p.index.append(e)
114 114 def trunc(self, pos):
115 115 self.p.trunc(pos)
116 116
117 117 class lazymap(object):
118 118 """a lazy version of the node map"""
119 119 def __init__(self, parser):
120 120 self.p = parser
121 121 def load(self, key):
122 122 if self.p.all: return
123 123 n = self.p.data.find(key)
124 124 if n < 0:
125 125 raise KeyError(key)
126 126 pos = n / self.p.s
127 127 self.p.load(pos)
128 128 def __contains__(self, key):
129 129 self.p.load()
130 130 return key in self.p.map
131 131 def __iter__(self):
132 132 yield nullid
133 133 for i in xrange(self.p.l):
134 134 try:
135 135 yield self.p.index[i][6]
136 136 except:
137 137 self.p.load(i)
138 138 yield self.p.index[i][6]
139 139 def __getitem__(self, key):
140 140 try:
141 141 return self.p.map[key]
142 142 except KeyError:
143 143 try:
144 144 self.load(key)
145 145 return self.p.map[key]
146 146 except KeyError:
147 147 raise KeyError("node " + hex(key))
148 148 def __setitem__(self, key, val):
149 149 self.p.map[key] = val
150 150 def __delitem__(self, key):
151 151 del self.p.map[key]
152 152
153 153 class RevlogError(Exception): pass
154 154
155 155 class revlog(object):
156 156 """
157 157 the underlying revision storage object
158 158
159 159 A revlog consists of two parts, an index and the revision data.
160 160
161 161 The index is a file with a fixed record size containing
162 162 information on each revision, includings its nodeid (hash), the
163 163 nodeids of its parents, the position and offset of its data within
164 164 the data file, and the revision it's based on. Finally, each entry
165 165 contains a linkrev entry that can serve as a pointer to external
166 166 data.
167 167
168 168 The revision data itself is a linear collection of data chunks.
169 169 Each chunk represents a revision and is usually represented as a
170 170 delta against the previous chunk. To bound lookup time, runs of
171 171 deltas are limited to about 2 times the length of the original
172 172 version data. This makes retrieval of a version proportional to
173 173 its size, or O(1) relative to the number of revisions.
174 174
175 175 Both pieces of the revlog are written to in an append-only
176 176 fashion, which means we never need to rewrite a file to insert or
177 177 remove data, and can use some simple techniques to avoid the need
178 178 for locking while reading.
179 179 """
180 180 def __init__(self, opener, indexfile, datafile):
181 181 """
182 182 create a revlog object
183 183
184 184 opener is a function that abstracts the file opening operation
185 185 and can be used to implement COW semantics or the like.
186 186 """
187 187 self.indexfile = indexfile
188 188 self.datafile = datafile
189 189 self.opener = opener
190 190 self.cache = None
191 191 self.chunkcache = None
192 192
193 193 try:
194 194 i = self.opener(self.indexfile).read()
195 195 except IOError, inst:
196 196 if inst.errno != errno.ENOENT:
197 197 raise
198 198 i = ""
199 199
200 200 if i and i[:4] != "\0\0\0\0":
201 201 raise RevlogError(_("incompatible revlog signature on %s") %
202 202 self.indexfile)
203 203
204 204 if len(i) > 10000:
205 205 # big index, let's parse it on demand
206 206 parser = lazyparser(i, self)
207 207 self.index = lazyindex(parser)
208 208 self.nodemap = lazymap(parser)
209 209 else:
210 210 s = struct.calcsize(indexformat)
211 211 l = len(i) / s
212 212 self.index = [None] * l
213 213 m = [None] * l
214 214
215 215 n = 0
216 216 for f in xrange(0, l * s, s):
217 217 # offset, size, base, linkrev, p1, p2, nodeid
218 218 e = struct.unpack(indexformat, i[f:f + s])
219 219 m[n] = (e[6], n)
220 220 self.index[n] = e
221 221 n += 1
222 222
223 223 self.nodemap = dict(m)
224 224 self.nodemap[nullid] = -1
225 225
226 226 def tip(self): return self.node(len(self.index) - 1)
227 227 def count(self): return len(self.index)
228 228 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
229 229 def rev(self, node):
230 230 try:
231 231 return self.nodemap[node]
232 232 except KeyError:
233 233 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
234 234 def linkrev(self, node): return self.index[self.rev(node)][3]
235 235 def parents(self, node):
236 236 if node == nullid: return (nullid, nullid)
237 237 return self.index[self.rev(node)][4:6]
238 238
239 239 def start(self, rev): return self.index[rev][0]
240 240 def length(self, rev): return self.index[rev][1]
241 241 def end(self, rev): return self.start(rev) + self.length(rev)
242 242 def base(self, rev): return self.index[rev][2]
243 243
244 244 def reachable(self, rev, stop=None):
245 245 reachable = {}
246 246 visit = [rev]
247 247 reachable[rev] = 1
248 248 if stop:
249 249 stopn = self.rev(stop)
250 250 else:
251 251 stopn = 0
252 252 while visit:
253 253 n = visit.pop(0)
254 254 if n == stop:
255 255 continue
256 256 if n == nullid:
257 257 continue
258 258 for p in self.parents(n):
259 259 if self.rev(p) < stopn:
260 260 continue
261 261 if p not in reachable:
262 262 reachable[p] = 1
263 263 visit.append(p)
264 264 return reachable
265 265
266 266 def nodesbetween(self, roots=None, heads=None):
267 267 """Return a tuple containing three elements. Elements 1 and 2 contain
268 268 a final list bases and heads after all the unreachable ones have been
269 269 pruned. Element 0 contains a topologically sorted list of all
270 270
271 271 nodes that satisfy these constraints:
272 272 1. All nodes must be descended from a node in roots (the nodes on
273 273 roots are considered descended from themselves).
274 274 2. All nodes must also be ancestors of a node in heads (the nodes in
275 275 heads are considered to be their own ancestors).
276 276
277 277 If roots is unspecified, nullid is assumed as the only root.
278 278 If heads is unspecified, it is taken to be the output of the
279 279 heads method (i.e. a list of all nodes in the repository that
280 280 have no children)."""
281 281 nonodes = ([], [], [])
282 282 if roots is not None:
283 283 roots = list(roots)
284 284 if not roots:
285 285 return nonodes
286 286 lowestrev = min([self.rev(n) for n in roots])
287 287 else:
288 288 roots = [nullid] # Everybody's a descendent of nullid
289 289 lowestrev = -1
290 290 if (lowestrev == -1) and (heads is None):
291 291 # We want _all_ the nodes!
292 292 return ([self.node(r) for r in xrange(0, self.count())],
293 293 [nullid], list(self.heads()))
294 294 if heads is None:
295 295 # All nodes are ancestors, so the latest ancestor is the last
296 296 # node.
297 297 highestrev = self.count() - 1
298 298 # Set ancestors to None to signal that every node is an ancestor.
299 299 ancestors = None
300 300 # Set heads to an empty dictionary for later discovery of heads
301 301 heads = {}
302 302 else:
303 303 heads = list(heads)
304 304 if not heads:
305 305 return nonodes
306 306 ancestors = {}
307 307 # Start at the top and keep marking parents until we're done.
308 308 nodestotag = heads[:]
309 309 # Turn heads into a dictionary so we can remove 'fake' heads.
310 310 # Also, later we will be using it to filter out the heads we can't
311 311 # find from roots.
312 312 heads = dict.fromkeys(heads, 0)
313 313 # Remember where the top was so we can use it as a limit later.
314 314 highestrev = max([self.rev(n) for n in nodestotag])
315 315 while nodestotag:
316 316 # grab a node to tag
317 317 n = nodestotag.pop()
318 318 # Never tag nullid
319 319 if n == nullid:
320 320 continue
321 321 # A node's revision number represents its place in a
322 322 # topologically sorted list of nodes.
323 323 r = self.rev(n)
324 324 if r >= lowestrev:
325 325 if n not in ancestors:
326 326 # If we are possibly a descendent of one of the roots
327 327 # and we haven't already been marked as an ancestor
328 328 ancestors[n] = 1 # Mark as ancestor
329 329 # Add non-nullid parents to list of nodes to tag.
330 330 nodestotag.extend([p for p in self.parents(n) if
331 331 p != nullid])
332 332 elif n in heads: # We've seen it before, is it a fake head?
333 333 # So it is, real heads should not be the ancestors of
334 334 # any other heads.
335 335 heads.pop(n)
336 336 if not ancestors:
337 337 return nonodes
338 338 # Now that we have our set of ancestors, we want to remove any
339 339 # roots that are not ancestors.
340 340
341 341 # If one of the roots was nullid, everything is included anyway.
342 342 if lowestrev > -1:
343 343 # But, since we weren't, let's recompute the lowest rev to not
344 344 # include roots that aren't ancestors.
345 345
346 346 # Filter out roots that aren't ancestors of heads
347 347 roots = [n for n in roots if n in ancestors]
348 348 # Recompute the lowest revision
349 349 if roots:
350 350 lowestrev = min([self.rev(n) for n in roots])
351 351 else:
352 352 # No more roots? Return empty list
353 353 return nonodes
354 354 else:
355 355 # We are descending from nullid, and don't need to care about
356 356 # any other roots.
357 357 lowestrev = -1
358 358 roots = [nullid]
359 359 # Transform our roots list into a 'set' (i.e. a dictionary where the
360 360 # values don't matter.
361 361 descendents = dict.fromkeys(roots, 1)
362 362 # Also, keep the original roots so we can filter out roots that aren't
363 363 # 'real' roots (i.e. are descended from other roots).
364 364 roots = descendents.copy()
365 365 # Our topologically sorted list of output nodes.
366 366 orderedout = []
367 367 # Don't start at nullid since we don't want nullid in our output list,
368 368 # and if nullid shows up in descedents, empty parents will look like
369 369 # they're descendents.
370 370 for r in xrange(max(lowestrev, 0), highestrev + 1):
371 371 n = self.node(r)
372 372 isdescendent = False
373 373 if lowestrev == -1: # Everybody is a descendent of nullid
374 374 isdescendent = True
375 375 elif n in descendents:
376 376 # n is already a descendent
377 377 isdescendent = True
378 378 # This check only needs to be done here because all the roots
379 379 # will start being marked is descendents before the loop.
380 380 if n in roots:
381 381 # If n was a root, check if it's a 'real' root.
382 382 p = tuple(self.parents(n))
383 383 # If any of its parents are descendents, it's not a root.
384 384 if (p[0] in descendents) or (p[1] in descendents):
385 385 roots.pop(n)
386 386 else:
387 387 p = tuple(self.parents(n))
388 388 # A node is a descendent if either of its parents are
389 389 # descendents. (We seeded the dependents list with the roots
390 390 # up there, remember?)
391 391 if (p[0] in descendents) or (p[1] in descendents):
392 392 descendents[n] = 1
393 393 isdescendent = True
394 394 if isdescendent and ((ancestors is None) or (n in ancestors)):
395 395 # Only include nodes that are both descendents and ancestors.
396 396 orderedout.append(n)
397 397 if (ancestors is not None) and (n in heads):
398 398 # We're trying to figure out which heads are reachable
399 399 # from roots.
400 400 # Mark this head as having been reached
401 401 heads[n] = 1
402 402 elif ancestors is None:
403 403 # Otherwise, we're trying to discover the heads.
404 404 # Assume this is a head because if it isn't, the next step
405 405 # will eventually remove it.
406 406 heads[n] = 1
407 407 # But, obviously its parents aren't.
408 408 for p in self.parents(n):
409 409 heads.pop(p, None)
410 410 heads = [n for n in heads.iterkeys() if heads[n] != 0]
411 411 roots = roots.keys()
412 412 assert orderedout
413 413 assert roots
414 414 assert heads
415 415 return (orderedout, roots, heads)
416 416
417 417 def heads(self, start=None):
418 418 """return the list of all nodes that have no children
419 419
420 420 if start is specified, only heads that are descendants of
421 421 start will be returned
422 422
423 423 """
424 424 if start is None:
425 425 start = nullid
426 426 reachable = {start: 1}
427 427 heads = {start: 1}
428 428 startrev = self.rev(start)
429 429
430 430 for r in xrange(startrev + 1, self.count()):
431 431 n = self.node(r)
432 432 for pn in self.parents(n):
433 433 if pn in reachable:
434 434 reachable[n] = 1
435 435 heads[n] = 1
436 436 if pn in heads:
437 437 del heads[pn]
438 438 return heads.keys()
439 439
440 440 def children(self, node):
441 441 """find the children of a given node"""
442 442 c = []
443 443 p = self.rev(node)
444 444 for r in range(p + 1, self.count()):
445 445 n = self.node(r)
446 446 for pn in self.parents(n):
447 447 if pn == node:
448 448 c.append(n)
449 449 continue
450 450 elif pn == nullid:
451 451 continue
452 452 return c
453 453
454 454 def lookup(self, id):
455 455 """locate a node based on revision number or subset of hex nodeid"""
456 456 try:
457 457 rev = int(id)
458 458 if str(rev) != id: raise ValueError
459 459 if rev < 0: rev = self.count() + rev
460 460 if rev < 0 or rev >= self.count(): raise ValueError
461 461 return self.node(rev)
462 462 except (ValueError, OverflowError):
463 463 c = []
464 464 for n in self.nodemap:
465 465 if hex(n).startswith(id):
466 466 c.append(n)
467 467 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
468 468 if len(c) < 1: raise RevlogError(_("No match found"))
469 469 return c[0]
470 470
471 471 return None
472 472
473 473 def diff(self, a, b):
474 474 """return a delta between two revisions"""
475 475 return mdiff.textdiff(a, b)
476 476
477 477 def patches(self, t, pl):
478 478 """apply a list of patches to a string"""
479 479 return mdiff.patches(t, pl)
480 480
481 481 def chunk(self, rev):
482 482 start, length = self.start(rev), self.length(rev)
483 483 end = start + length
484 484
485 485 def loadcache():
486 486 cache_length = max(4096 * 1024, length) # 4Mo
487 487 df = self.opener(self.datafile)
488 488 df.seek(start)
489 489 self.chunkcache = (start, df.read(cache_length))
490 490
491 491 if not self.chunkcache:
492 492 loadcache()
493 493
494 494 cache_start = self.chunkcache[0]
495 495 cache_end = cache_start + len(self.chunkcache[1])
496 496 if start >= cache_start and end <= cache_end:
497 497 # it is cached
498 498 offset = start - cache_start
499 499 else:
500 500 loadcache()
501 501 offset = 0
502 502
503 503 #def checkchunk():
504 504 # df = self.opener(self.datafile)
505 505 # df.seek(start)
506 506 # return df.read(length)
507 507 #assert s == checkchunk()
508 508 return decompress(self.chunkcache[1][offset:offset + length])
509 509
510 510 def delta(self, node):
511 511 """return or calculate a delta between a node and its predecessor"""
512 512 r = self.rev(node)
513 513 b = self.base(r)
514 514 if r == b:
515 515 return self.diff(self.revision(self.node(r - 1)),
516 516 self.revision(node))
517 517 else:
518 518 return self.chunk(r)
519 519
520 520 def revision(self, node):
521 521 """return an uncompressed revision of a given"""
522 522 if node == nullid: return ""
523 523 if self.cache and self.cache[0] == node: return self.cache[2]
524 524
525 525 # look up what we need to read
526 526 text = None
527 527 rev = self.rev(node)
528 528 base = self.base(rev)
529 529
530 530 # do we have useful data cached?
531 531 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
532 532 base = self.cache[1]
533 533 text = self.cache[2]
534 534 else:
535 535 text = self.chunk(base)
536 536
537 537 bins = []
538 538 for r in xrange(base + 1, rev + 1):
539 539 bins.append(self.chunk(r))
540 540
541 541 text = mdiff.patches(text, bins)
542 542
543 543 p1, p2 = self.parents(node)
544 544 if node != hash(text, p1, p2):
545 545 raise RevlogError(_("integrity check failed on %s:%d")
546 546 % (self.datafile, rev))
547 547
548 548 self.cache = (node, rev, text)
549 549 return text
550 550
551 551 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
552 552 """add a revision to the log
553 553
554 554 text - the revision data to add
555 555 transaction - the transaction object used for rollback
556 556 link - the linkrev data to add
557 557 p1, p2 - the parent nodeids of the revision
558 558 d - an optional precomputed delta
559 559 """
560 560 if text is None: text = ""
561 561 if p1 is None: p1 = self.tip()
562 562 if p2 is None: p2 = nullid
563 563
564 564 node = hash(text, p1, p2)
565 565
566 566 if node in self.nodemap:
567 567 return node
568 568
569 569 n = self.count()
570 570 t = n - 1
571 571
572 572 if n:
573 573 base = self.base(t)
574 574 start = self.start(base)
575 575 end = self.end(t)
576 576 if not d:
577 577 prev = self.revision(self.tip())
578 578 d = self.diff(prev, str(text))
579 579 data = compress(d)
580 580 l = len(data[1]) + len(data[0])
581 581 dist = end - start + l
582 582
583 583 # full versions are inserted when the needed deltas
584 584 # become comparable to the uncompressed text
585 585 if not n or dist > len(text) * 2:
586 586 data = compress(text)
587 587 l = len(data[1]) + len(data[0])
588 588 base = n
589 589 else:
590 590 base = self.base(t)
591 591
592 592 offset = 0
593 593 if t >= 0:
594 594 offset = self.end(t)
595 595
596 596 e = (offset, l, base, link, p1, p2, node)
597 597
598 598 self.index.append(e)
599 599 self.nodemap[node] = n
600 600 entry = struct.pack(indexformat, *e)
601 601
602 602 transaction.add(self.datafile, e[0])
603 603 f = self.opener(self.datafile, "a")
604 604 if data[0]:
605 605 f.write(data[0])
606 606 f.write(data[1])
607 607 transaction.add(self.indexfile, n * len(entry))
608 608 self.opener(self.indexfile, "a").write(entry)
609 609
610 610 self.cache = (node, n, text)
611 611 return node
612 612
613 613 def ancestor(self, a, b):
614 614 """calculate the least common ancestor of nodes a and b"""
615 615 # calculate the distance of every node from root
616 616 dist = {nullid: 0}
617 617 for i in xrange(self.count()):
618 618 n = self.node(i)
619 619 p1, p2 = self.parents(n)
620 620 dist[n] = max(dist[p1], dist[p2]) + 1
621 621
622 622 # traverse ancestors in order of decreasing distance from root
623 623 def ancestors(node):
624 624 # we store negative distances because heap returns smallest member
625 625 h = [(-dist[node], node)]
626 626 seen = {}
627 earliest = self.count()
628 627 while h:
629 628 d, n = heapq.heappop(h)
630 629 if n not in seen:
631 630 seen[n] = 1
632 r = self.rev(n)
633 631 yield (-d, n)
634 632 for p in self.parents(n):
635 633 heapq.heappush(h, (-dist[p], p))
636 634
637 635 def generations(node):
638 636 sg, s = None, {}
639 637 for g,n in ancestors(node):
640 638 if g != sg:
641 639 if sg:
642 640 yield sg, s
643 641 sg, s = g, {n:1}
644 642 else:
645 643 s[n] = 1
646 644 yield sg, s
647 645
648 646 x = generations(a)
649 647 y = generations(b)
650 648 gx = x.next()
651 649 gy = y.next()
652 650
653 651 # increment each ancestor list until it is closer to root than
654 652 # the other, or they match
655 653 while 1:
656 654 #print "ancestor gen %s %s" % (gx[0], gy[0])
657 655 if gx[0] == gy[0]:
658 656 # find the intersection
659 657 i = [ n for n in gx[1] if n in gy[1] ]
660 658 if i:
661 659 return i[0]
662 660 else:
663 661 #print "next"
664 662 gy = y.next()
665 663 gx = x.next()
666 664 elif gx[0] < gy[0]:
667 665 #print "next y"
668 666 gy = y.next()
669 667 else:
670 668 #print "next x"
671 669 gx = x.next()
672 670
673 671 def group(self, nodelist, lookup, infocollect=None):
674 672 """calculate a delta group
675 673
676 674 Given a list of changeset revs, return a set of deltas and
677 675 metadata corresponding to nodes. the first delta is
678 676 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
679 677 have this parent as it has all history before these
680 678 changesets. parent is parent[0]
681 679 """
682 680 revs = [self.rev(n) for n in nodelist]
683 681
684 682 # if we don't have any revisions touched by these changesets, bail
685 683 if not revs:
686 684 yield struct.pack(">l", 0)
687 685 return
688 686
689 687 # add the parent of the first rev
690 688 p = self.parents(self.node(revs[0]))[0]
691 689 revs.insert(0, self.rev(p))
692 690
693 # helper to reconstruct intermediate versions
694 def construct(text, base, rev):
695 bins = [self.chunk(r) for r in xrange(base + 1, rev + 1)]
696 return mdiff.patches(text, bins)
697
698 691 # build deltas
699 692 for d in xrange(0, len(revs) - 1):
700 693 a, b = revs[d], revs[d + 1]
701 694 na = self.node(a)
702 695 nb = self.node(b)
703 696
704 697 if infocollect is not None:
705 698 infocollect(nb)
706 699
707 700 # do we need to construct a new delta?
708 701 if a + 1 != b or self.base(b) == b:
709 702 ta = self.revision(na)
710 703 tb = self.revision(nb)
711 704 d = self.diff(ta, tb)
712 705 else:
713 706 d = self.chunk(b)
714 707
715 708 p = self.parents(nb)
716 709 meta = nb + p[0] + p[1] + lookup(nb)
717 710 l = struct.pack(">l", len(meta) + len(d) + 4)
718 711 yield l
719 712 yield meta
720 713 yield d
721 714
722 715 yield struct.pack(">l", 0)
723 716
724 717 def addgroup(self, revs, linkmapper, transaction, unique=0):
725 718 """
726 719 add a delta group
727 720
728 721 given a set of deltas, add them to the revision log. the
729 722 first delta is against its parent, which should be in our
730 723 log, the rest are against the previous delta.
731 724 """
732 725
733 726 #track the base of the current delta log
734 727 r = self.count()
735 728 t = r - 1
736 729 node = nullid
737 730
738 731 base = prev = -1
739 732 start = end = measure = 0
740 733 if r:
741 start = self.start(self.base(t))
734 base = self.base(t)
735 start = self.start(base)
742 736 end = self.end(t)
743 measure = self.length(self.base(t))
744 base = self.base(t)
737 measure = self.length(base)
745 738 prev = self.tip()
746 739
747 740 transaction.add(self.datafile, end)
748 741 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
749 742 dfh = self.opener(self.datafile, "a")
750 743 ifh = self.opener(self.indexfile, "a")
751 744
752 745 # loop through our set of deltas
753 746 chain = None
754 747 for chunk in revs:
755 748 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
756 749 link = linkmapper(cs)
757 750 if node in self.nodemap:
758 751 # this can happen if two branches make the same change
759 752 # if unique:
760 753 # raise RevlogError(_("already have %s") % hex(node[:4]))
761 754 chain = node
762 755 continue
763 756 delta = chunk[80:]
764 757
765 758 for p in (p1, p2):
766 759 if not p in self.nodemap:
767 760 raise RevlogError(_("unknown parent %s") % short(p1))
768 761
769 762 if not chain:
770 763 # retrieve the parent revision of the delta chain
771 764 chain = p1
772 765 if not chain in self.nodemap:
773 766 raise RevlogError(_("unknown base %s") % short(chain[:4]))
774 767
775 768 # full versions are inserted when the needed deltas become
776 769 # comparable to the uncompressed text or when the previous
777 770 # version is not the one we have a delta against. We use
778 771 # the size of the previous full rev as a proxy for the
779 772 # current size.
780 773
781 774 if chain == prev:
782 775 tempd = compress(delta)
783 776 cdelta = tempd[0] + tempd[1]
784 777
785 778 if chain != prev or (end - start + len(cdelta)) > measure * 2:
786 779 # flush our writes here so we can read it in revision
787 780 dfh.flush()
788 781 ifh.flush()
789 782 text = self.revision(chain)
790 783 text = self.patches(text, [delta])
791 784 chk = self.addrevision(text, transaction, link, p1, p2)
792 785 if chk != node:
793 786 raise RevlogError(_("consistency error adding group"))
794 787 measure = len(text)
795 788 else:
796 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
789 e = (end, len(cdelta), base, link, p1, p2, node)
797 790 self.index.append(e)
798 791 self.nodemap[node] = r
799 792 dfh.write(cdelta)
800 793 ifh.write(struct.pack(indexformat, *e))
801 794
802 795 t, r, chain, prev = r, r + 1, node, node
803 start = self.start(self.base(t))
796 base = self.base(t)
797 start = self.start(base)
804 798 end = self.end(t)
805 799
806 800 dfh.close()
807 801 ifh.close()
808 802 return node
809 803
810 804 def strip(self, rev, minlink):
811 805 if self.count() == 0 or rev >= self.count():
812 806 return
813 807
814 808 # When stripping away a revision, we need to make sure it
815 809 # does not actually belong to an older changeset.
816 810 # The minlink parameter defines the oldest revision
817 811 # we're allowed to strip away.
818 812 while minlink > self.index[rev][3]:
819 813 rev += 1
820 814 if rev >= self.count():
821 815 return
822 816
823 817 # first truncate the files on disk
824 818 end = self.start(rev)
825 819 self.opener(self.datafile, "a").truncate(end)
826 820 end = rev * struct.calcsize(indexformat)
827 821 self.opener(self.indexfile, "a").truncate(end)
828 822
829 823 # then reset internal state in memory to forget those revisions
830 824 self.cache = None
831 825 self.chunkcache = None
832 826 for p in self.index[rev:]:
833 827 del self.nodemap[p[6]]
834 828 del self.index[rev:]
835 829
836 830 # truncating the lazyindex also truncates the lazymap.
837 831 if isinstance(self.index, lazyindex):
838 832 self.index.trunc(end)
839 833
840 834
841 835 def checksize(self):
842 836 expected = 0
843 837 if self.count():
844 838 expected = self.end(self.count() - 1)
845 839
846 840 try:
847 841 f = self.opener(self.datafile)
848 842 f.seek(0, 2)
849 843 actual = f.tell()
850 844 dd = actual - expected
851 845 except IOError, inst:
852 846 if inst.errno != errno.ENOENT:
853 847 raise
854 848 dd = 0
855 849
856 850 try:
857 851 f = self.opener(self.indexfile)
858 852 f.seek(0, 2)
859 853 actual = f.tell()
860 854 s = struct.calcsize(indexformat)
861 855 i = actual / s
862 856 di = actual - (i * s)
863 857 except IOError, inst:
864 858 if inst.errno != errno.ENOENT:
865 859 raise
866 860 di = 0
867 861
868 862 return (dd, di)
869 863
870 864
General Comments 0
You need to be logged in to leave comments. Login now