##// END OF EJS Templates
encoding: pull fallbackencoding out of localrepo into early parsing
Matt Mackall -
r4619:5fd7cc89 default
parent child Browse files
Show More
@@ -1,1202 +1,1206
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import os, sys, atexit, signal, pdb, traceback, socket, errno, shlex
11 11 import mdiff, bdiff, util, templater, patch, commands, hg, lock, time
12 12 import fancyopts, revlog, version, extensions
13 13
14 14 revrangesep = ':'
15 15
16 16 class UnknownCommand(Exception):
17 17 """Exception raised if command is not in the command table."""
18 18 class AmbiguousCommand(Exception):
19 19 """Exception raised if command shortcut matches more than one command."""
20 20 class ParseError(Exception):
21 21 """Exception raised on errors in parsing the command line."""
22 22
23 23 def runcatch(ui, args):
24 24 def catchterm(*args):
25 25 raise util.SignalInterrupt
26 26
27 27 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
28 28 num = getattr(signal, name, None)
29 29 if num: signal.signal(num, catchterm)
30 30
31 31 try:
32 32 try:
33 33 # enter the debugger before command execution
34 34 if '--debugger' in args:
35 35 pdb.set_trace()
36 36 try:
37 37 return dispatch(ui, args)
38 38 finally:
39 39 ui.flush()
40 40 except:
41 41 # enter the debugger when we hit an exception
42 42 if '--debugger' in args:
43 43 pdb.post_mortem(sys.exc_info()[2])
44 44 ui.print_exc()
45 45 raise
46 46
47 47 except ParseError, inst:
48 48 if inst.args[0]:
49 49 ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
50 50 commands.help_(ui, inst.args[0])
51 51 else:
52 52 ui.warn(_("hg: %s\n") % inst.args[1])
53 53 commands.help_(ui, 'shortlist')
54 54 except AmbiguousCommand, inst:
55 55 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
56 56 (inst.args[0], " ".join(inst.args[1])))
57 57 except UnknownCommand, inst:
58 58 ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
59 59 commands.help_(ui, 'shortlist')
60 60 except hg.RepoError, inst:
61 61 ui.warn(_("abort: %s!\n") % inst)
62 62 except lock.LockHeld, inst:
63 63 if inst.errno == errno.ETIMEDOUT:
64 64 reason = _('timed out waiting for lock held by %s') % inst.locker
65 65 else:
66 66 reason = _('lock held by %s') % inst.locker
67 67 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
68 68 except lock.LockUnavailable, inst:
69 69 ui.warn(_("abort: could not lock %s: %s\n") %
70 70 (inst.desc or inst.filename, inst.strerror))
71 71 except revlog.RevlogError, inst:
72 72 ui.warn(_("abort: %s!\n") % inst)
73 73 except util.SignalInterrupt:
74 74 ui.warn(_("killed!\n"))
75 75 except KeyboardInterrupt:
76 76 try:
77 77 ui.warn(_("interrupted!\n"))
78 78 except IOError, inst:
79 79 if inst.errno == errno.EPIPE:
80 80 if ui.debugflag:
81 81 ui.warn(_("\nbroken pipe\n"))
82 82 else:
83 83 raise
84 84 except socket.error, inst:
85 85 ui.warn(_("abort: %s\n") % inst[1])
86 86 except IOError, inst:
87 87 if hasattr(inst, "code"):
88 88 ui.warn(_("abort: %s\n") % inst)
89 89 elif hasattr(inst, "reason"):
90 90 try: # usually it is in the form (errno, strerror)
91 91 reason = inst.reason.args[1]
92 92 except: # it might be anything, for example a string
93 93 reason = inst.reason
94 94 ui.warn(_("abort: error: %s\n") % reason)
95 95 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
96 96 if ui.debugflag:
97 97 ui.warn(_("broken pipe\n"))
98 98 elif getattr(inst, "strerror", None):
99 99 if getattr(inst, "filename", None):
100 100 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
101 101 else:
102 102 ui.warn(_("abort: %s\n") % inst.strerror)
103 103 else:
104 104 raise
105 105 except OSError, inst:
106 106 if getattr(inst, "filename", None):
107 107 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
108 108 else:
109 109 ui.warn(_("abort: %s\n") % inst.strerror)
110 110 except util.UnexpectedOutput, inst:
111 111 ui.warn(_("abort: %s") % inst[0])
112 112 if not isinstance(inst[1], basestring):
113 113 ui.warn(" %r\n" % (inst[1],))
114 114 elif not inst[1]:
115 115 ui.warn(_(" empty string\n"))
116 116 else:
117 117 ui.warn("\n%r\n" % util.ellipsis(inst[1]))
118 118 except util.Abort, inst:
119 119 ui.warn(_("abort: %s\n") % inst)
120 120 except TypeError, inst:
121 121 # was this an argument error?
122 122 tb = traceback.extract_tb(sys.exc_info()[2])
123 123 if len(tb) > 2: # no
124 124 raise
125 125 ui.debug(inst, "\n")
126 126 ui.warn(_("%s: invalid arguments\n") % cmd)
127 127 commands.help_(ui, cmd)
128 128 except SystemExit, inst:
129 129 # Commands shouldn't sys.exit directly, but give a return code.
130 130 # Just in case catch this and and pass exit code to caller.
131 131 return inst.code
132 132 except:
133 133 ui.warn(_("** unknown exception encountered, details follow\n"))
134 134 ui.warn(_("** report bug details to "
135 135 "http://www.selenic.com/mercurial/bts\n"))
136 136 ui.warn(_("** or mercurial@selenic.com\n"))
137 137 ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
138 138 % version.get_version())
139 139 raise
140 140
141 141 return -1
142 142
143 143 def findpossible(ui, cmd):
144 144 """
145 145 Return cmd -> (aliases, command table entry)
146 146 for each matching command.
147 147 Return debug commands (or their aliases) only if no normal command matches.
148 148 """
149 149 choice = {}
150 150 debugchoice = {}
151 151 for e in commands.table.keys():
152 152 aliases = e.lstrip("^").split("|")
153 153 found = None
154 154 if cmd in aliases:
155 155 found = cmd
156 156 elif not ui.config("ui", "strict"):
157 157 for a in aliases:
158 158 if a.startswith(cmd):
159 159 found = a
160 160 break
161 161 if found is not None:
162 162 if aliases[0].startswith("debug") or found.startswith("debug"):
163 163 debugchoice[found] = (aliases, commands.table[e])
164 164 else:
165 165 choice[found] = (aliases, commands.table[e])
166 166
167 167 if not choice and debugchoice:
168 168 choice = debugchoice
169 169
170 170 return choice
171 171
172 172 def findcmd(ui, cmd):
173 173 """Return (aliases, command table entry) for command string."""
174 174 choice = findpossible(ui, cmd)
175 175
176 176 if choice.has_key(cmd):
177 177 return choice[cmd]
178 178
179 179 if len(choice) > 1:
180 180 clist = choice.keys()
181 181 clist.sort()
182 182 raise AmbiguousCommand(cmd, clist)
183 183
184 184 if choice:
185 185 return choice.values()[0]
186 186
187 187 raise UnknownCommand(cmd)
188 188
189 189 def findrepo():
190 190 p = os.getcwd()
191 191 while not os.path.isdir(os.path.join(p, ".hg")):
192 192 oldp, p = p, os.path.dirname(p)
193 193 if p == oldp:
194 194 return None
195 195
196 196 return p
197 197
198 198 def parse(ui, args):
199 199 options = {}
200 200 cmdoptions = {}
201 201
202 202 try:
203 203 args = fancyopts.fancyopts(args, commands.globalopts, options)
204 204 except fancyopts.getopt.GetoptError, inst:
205 205 raise ParseError(None, inst)
206 206
207 207 if args:
208 208 cmd, args = args[0], args[1:]
209 209 aliases, i = findcmd(ui, cmd)
210 210 cmd = aliases[0]
211 211 defaults = ui.config("defaults", cmd)
212 212 if defaults:
213 213 args = shlex.split(defaults) + args
214 214 c = list(i[1])
215 215 else:
216 216 cmd = None
217 217 c = []
218 218
219 219 # combine global options into local
220 220 for o in commands.globalopts:
221 221 c.append((o[0], o[1], options[o[1]], o[3]))
222 222
223 223 try:
224 224 args = fancyopts.fancyopts(args, c, cmdoptions)
225 225 except fancyopts.getopt.GetoptError, inst:
226 226 raise ParseError(cmd, inst)
227 227
228 228 # separate global options back out
229 229 for o in commands.globalopts:
230 230 n = o[1]
231 231 options[n] = cmdoptions[n]
232 232 del cmdoptions[n]
233 233
234 234 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
235 235
236 236 def parseconfig(config):
237 237 """parse the --config options from the command line"""
238 238 parsed = []
239 239 for cfg in config:
240 240 try:
241 241 name, value = cfg.split('=', 1)
242 242 section, name = name.split('.', 1)
243 243 if not section or not name:
244 244 raise IndexError
245 245 parsed.append((section, name, value))
246 246 except (IndexError, ValueError):
247 247 raise util.Abort(_('malformed --config option: %s') % cfg)
248 248 return parsed
249 249
250 250 def earlygetopt(aliases, args):
251 251 if "--" in args:
252 252 args = args[:args.index("--")]
253 253 for opt in aliases:
254 254 if opt in args:
255 255 return args[args.index(opt) + 1]
256 256 return None
257 257
258 258 def dispatch(ui, args):
259 259 # check for cwd first
260 260 cwd = earlygetopt(['--cwd'], args)
261 261 if cwd:
262 262 os.chdir(cwd)
263 263
264 extensions.loadall(ui)
265
266 264 # read the local repository .hgrc into a local ui object
267 265 # this will trigger its extensions to load
268 266 path = earlygetopt(["-R", "--repository", "--repo"], args)
269 267 if not path:
270 268 path = findrepo() or ""
269 lui = ui
271 270 if path:
272 271 try:
273 272 lui = commands.ui.ui(parentui=ui)
274 273 lui.readconfig(os.path.join(path, ".hg", "hgrc"))
274 except IOError:
275 pass
276
275 277 extensions.loadall(lui)
276 except IOError:
277 extensions.loadall(ui)
278 # check for fallback encoding
279 fallback = lui.config('ui', 'fallbackencoding')
280 if fallback:
281 util._fallbackencoding = fallback
278 282
279 283 cmd, func, args, options, cmdoptions = parse(ui, args)
280 284
281 285 if options["encoding"]:
282 286 util._encoding = options["encoding"]
283 287 if options["encodingmode"]:
284 288 util._encodingmode = options["encodingmode"]
285 289 if options["time"]:
286 290 def get_times():
287 291 t = os.times()
288 292 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
289 293 t = (t[0], t[1], t[2], t[3], time.clock())
290 294 return t
291 295 s = get_times()
292 296 def print_time():
293 297 t = get_times()
294 298 ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
295 299 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
296 300 atexit.register(print_time)
297 301
298 302 ui.updateopts(options["verbose"], options["debug"], options["quiet"],
299 303 not options["noninteractive"], options["traceback"],
300 304 parseconfig(options["config"]))
301 305
302 306 if options['help']:
303 307 return commands.help_(ui, cmd, options['version'])
304 308 elif options['version']:
305 309 return commands.version_(ui)
306 310 elif not cmd:
307 311 return commands.help_(ui, 'shortlist')
308 312
309 313 if cmd not in commands.norepo.split():
310 314 repo = None
311 315 try:
312 316 repo = hg.repository(ui, path=path)
313 317 ui = repo.ui
314 318 if not repo.local():
315 319 raise util.Abort(_("repository '%s' is not local") % path)
316 320 except hg.RepoError:
317 321 if cmd not in commands.optionalrepo.split():
318 322 raise
319 323 d = lambda: func(ui, repo, *args, **cmdoptions)
320 324 else:
321 325 d = lambda: func(ui, *args, **cmdoptions)
322 326
323 327 return runcommand(ui, options, d)
324 328
325 329 def runcommand(ui, options, cmdfunc):
326 330 if options['profile']:
327 331 import hotshot, hotshot.stats
328 332 prof = hotshot.Profile("hg.prof")
329 333 try:
330 334 try:
331 335 return prof.runcall(cmdfunc)
332 336 except:
333 337 try:
334 338 ui.warn(_('exception raised - generating '
335 339 'profile anyway\n'))
336 340 except:
337 341 pass
338 342 raise
339 343 finally:
340 344 prof.close()
341 345 stats = hotshot.stats.load("hg.prof")
342 346 stats.strip_dirs()
343 347 stats.sort_stats('time', 'calls')
344 348 stats.print_stats(40)
345 349 elif options['lsprof']:
346 350 try:
347 351 from mercurial import lsprof
348 352 except ImportError:
349 353 raise util.Abort(_(
350 354 'lsprof not available - install from '
351 355 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
352 356 p = lsprof.Profiler()
353 357 p.enable(subcalls=True)
354 358 try:
355 359 return cmdfunc()
356 360 finally:
357 361 p.disable()
358 362 stats = lsprof.Stats(p.getstats())
359 363 stats.sort()
360 364 stats.pprint(top=10, file=sys.stderr, climit=5)
361 365 else:
362 366 return cmdfunc()
363 367
364 368 def bail_if_changed(repo):
365 369 modified, added, removed, deleted = repo.status()[:4]
366 370 if modified or added or removed or deleted:
367 371 raise util.Abort(_("outstanding uncommitted changes"))
368 372
369 373 def logmessage(opts):
370 374 """ get the log message according to -m and -l option """
371 375 message = opts['message']
372 376 logfile = opts['logfile']
373 377
374 378 if message and logfile:
375 379 raise util.Abort(_('options --message and --logfile are mutually '
376 380 'exclusive'))
377 381 if not message and logfile:
378 382 try:
379 383 if logfile == '-':
380 384 message = sys.stdin.read()
381 385 else:
382 386 message = open(logfile).read()
383 387 except IOError, inst:
384 388 raise util.Abort(_("can't read commit message '%s': %s") %
385 389 (logfile, inst.strerror))
386 390 return message
387 391
388 392 def setremoteconfig(ui, opts):
389 393 "copy remote options to ui tree"
390 394 if opts.get('ssh'):
391 395 ui.setconfig("ui", "ssh", opts['ssh'])
392 396 if opts.get('remotecmd'):
393 397 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
394 398
395 399 def parseurl(url, revs):
396 400 '''parse url#branch, returning url, branch + revs'''
397 401
398 402 if '#' not in url:
399 403 return url, (revs or None)
400 404
401 405 url, rev = url.split('#', 1)
402 406 return url, revs + [rev]
403 407
404 408 def revpair(repo, revs):
405 409 '''return pair of nodes, given list of revisions. second item can
406 410 be None, meaning use working dir.'''
407 411
408 412 def revfix(repo, val, defval):
409 413 if not val and val != 0 and defval is not None:
410 414 val = defval
411 415 return repo.lookup(val)
412 416
413 417 if not revs:
414 418 return repo.dirstate.parents()[0], None
415 419 end = None
416 420 if len(revs) == 1:
417 421 if revrangesep in revs[0]:
418 422 start, end = revs[0].split(revrangesep, 1)
419 423 start = revfix(repo, start, 0)
420 424 end = revfix(repo, end, repo.changelog.count() - 1)
421 425 else:
422 426 start = revfix(repo, revs[0], None)
423 427 elif len(revs) == 2:
424 428 if revrangesep in revs[0] or revrangesep in revs[1]:
425 429 raise util.Abort(_('too many revisions specified'))
426 430 start = revfix(repo, revs[0], None)
427 431 end = revfix(repo, revs[1], None)
428 432 else:
429 433 raise util.Abort(_('too many revisions specified'))
430 434 return start, end
431 435
432 436 def revrange(repo, revs):
433 437 """Yield revision as strings from a list of revision specifications."""
434 438
435 439 def revfix(repo, val, defval):
436 440 if not val and val != 0 and defval is not None:
437 441 return defval
438 442 return repo.changelog.rev(repo.lookup(val))
439 443
440 444 seen, l = {}, []
441 445 for spec in revs:
442 446 if revrangesep in spec:
443 447 start, end = spec.split(revrangesep, 1)
444 448 start = revfix(repo, start, 0)
445 449 end = revfix(repo, end, repo.changelog.count() - 1)
446 450 step = start > end and -1 or 1
447 451 for rev in xrange(start, end+step, step):
448 452 if rev in seen:
449 453 continue
450 454 seen[rev] = 1
451 455 l.append(rev)
452 456 else:
453 457 rev = revfix(repo, spec, None)
454 458 if rev in seen:
455 459 continue
456 460 seen[rev] = 1
457 461 l.append(rev)
458 462
459 463 return l
460 464
461 465 def make_filename(repo, pat, node,
462 466 total=None, seqno=None, revwidth=None, pathname=None):
463 467 node_expander = {
464 468 'H': lambda: hex(node),
465 469 'R': lambda: str(repo.changelog.rev(node)),
466 470 'h': lambda: short(node),
467 471 }
468 472 expander = {
469 473 '%': lambda: '%',
470 474 'b': lambda: os.path.basename(repo.root),
471 475 }
472 476
473 477 try:
474 478 if node:
475 479 expander.update(node_expander)
476 480 if node and revwidth is not None:
477 481 expander['r'] = (lambda:
478 482 str(repo.changelog.rev(node)).zfill(revwidth))
479 483 if total is not None:
480 484 expander['N'] = lambda: str(total)
481 485 if seqno is not None:
482 486 expander['n'] = lambda: str(seqno)
483 487 if total is not None and seqno is not None:
484 488 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
485 489 if pathname is not None:
486 490 expander['s'] = lambda: os.path.basename(pathname)
487 491 expander['d'] = lambda: os.path.dirname(pathname) or '.'
488 492 expander['p'] = lambda: pathname
489 493
490 494 newname = []
491 495 patlen = len(pat)
492 496 i = 0
493 497 while i < patlen:
494 498 c = pat[i]
495 499 if c == '%':
496 500 i += 1
497 501 c = pat[i]
498 502 c = expander[c]()
499 503 newname.append(c)
500 504 i += 1
501 505 return ''.join(newname)
502 506 except KeyError, inst:
503 507 raise util.Abort(_("invalid format spec '%%%s' in output file name") %
504 508 inst.args[0])
505 509
506 510 def make_file(repo, pat, node=None,
507 511 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
508 512 if not pat or pat == '-':
509 513 return 'w' in mode and sys.stdout or sys.stdin
510 514 if hasattr(pat, 'write') and 'w' in mode:
511 515 return pat
512 516 if hasattr(pat, 'read') and 'r' in mode:
513 517 return pat
514 518 return open(make_filename(repo, pat, node, total, seqno, revwidth,
515 519 pathname),
516 520 mode)
517 521
518 522 def matchpats(repo, pats=[], opts={}, globbed=False, default=None):
519 523 cwd = repo.getcwd()
520 524 return util.cmdmatcher(repo.root, cwd, pats or [], opts.get('include'),
521 525 opts.get('exclude'), globbed=globbed,
522 526 default=default)
523 527
524 528 def walk(repo, pats=[], opts={}, node=None, badmatch=None, globbed=False,
525 529 default=None):
526 530 files, matchfn, anypats = matchpats(repo, pats, opts, globbed=globbed,
527 531 default=default)
528 532 exact = dict.fromkeys(files)
529 533 cwd = repo.getcwd()
530 534 for src, fn in repo.walk(node=node, files=files, match=matchfn,
531 535 badmatch=badmatch):
532 536 yield src, fn, repo.pathto(fn, cwd), fn in exact
533 537
534 538 def findrenames(repo, added=None, removed=None, threshold=0.5):
535 539 '''find renamed files -- yields (before, after, score) tuples'''
536 540 if added is None or removed is None:
537 541 added, removed = repo.status()[1:3]
538 542 ctx = repo.changectx()
539 543 for a in added:
540 544 aa = repo.wread(a)
541 545 bestname, bestscore = None, threshold
542 546 for r in removed:
543 547 rr = ctx.filectx(r).data()
544 548
545 549 # bdiff.blocks() returns blocks of matching lines
546 550 # count the number of bytes in each
547 551 equal = 0
548 552 alines = mdiff.splitnewlines(aa)
549 553 matches = bdiff.blocks(aa, rr)
550 554 for x1,x2,y1,y2 in matches:
551 555 for line in alines[x1:x2]:
552 556 equal += len(line)
553 557
554 558 lengths = len(aa) + len(rr)
555 559 if lengths:
556 560 myscore = equal*2.0 / lengths
557 561 if myscore >= bestscore:
558 562 bestname, bestscore = r, myscore
559 563 if bestname:
560 564 yield bestname, a, bestscore
561 565
562 566 def addremove(repo, pats=[], opts={}, wlock=None, dry_run=None,
563 567 similarity=None):
564 568 if dry_run is None:
565 569 dry_run = opts.get('dry_run')
566 570 if similarity is None:
567 571 similarity = float(opts.get('similarity') or 0)
568 572 add, remove = [], []
569 573 mapping = {}
570 574 for src, abs, rel, exact in walk(repo, pats, opts):
571 575 target = repo.wjoin(abs)
572 576 if src == 'f' and repo.dirstate.state(abs) == '?':
573 577 add.append(abs)
574 578 mapping[abs] = rel, exact
575 579 if repo.ui.verbose or not exact:
576 580 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
577 581 if repo.dirstate.state(abs) != 'r' and not util.lexists(target):
578 582 remove.append(abs)
579 583 mapping[abs] = rel, exact
580 584 if repo.ui.verbose or not exact:
581 585 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
582 586 if not dry_run:
583 587 repo.add(add, wlock=wlock)
584 588 repo.remove(remove, wlock=wlock)
585 589 if similarity > 0:
586 590 for old, new, score in findrenames(repo, add, remove, similarity):
587 591 oldrel, oldexact = mapping[old]
588 592 newrel, newexact = mapping[new]
589 593 if repo.ui.verbose or not oldexact or not newexact:
590 594 repo.ui.status(_('recording removal of %s as rename to %s '
591 595 '(%d%% similar)\n') %
592 596 (oldrel, newrel, score * 100))
593 597 if not dry_run:
594 598 repo.copy(old, new, wlock=wlock)
595 599
596 600 def service(opts, parentfn=None, initfn=None, runfn=None):
597 601 '''Run a command as a service.'''
598 602
599 603 if opts['daemon'] and not opts['daemon_pipefds']:
600 604 rfd, wfd = os.pipe()
601 605 args = sys.argv[:]
602 606 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
603 607 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
604 608 args[0], args)
605 609 os.close(wfd)
606 610 os.read(rfd, 1)
607 611 if parentfn:
608 612 return parentfn(pid)
609 613 else:
610 614 os._exit(0)
611 615
612 616 if initfn:
613 617 initfn()
614 618
615 619 if opts['pid_file']:
616 620 fp = open(opts['pid_file'], 'w')
617 621 fp.write(str(os.getpid()) + '\n')
618 622 fp.close()
619 623
620 624 if opts['daemon_pipefds']:
621 625 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
622 626 os.close(rfd)
623 627 try:
624 628 os.setsid()
625 629 except AttributeError:
626 630 pass
627 631 os.write(wfd, 'y')
628 632 os.close(wfd)
629 633 sys.stdout.flush()
630 634 sys.stderr.flush()
631 635 fd = os.open(util.nulldev, os.O_RDWR)
632 636 if fd != 0: os.dup2(fd, 0)
633 637 if fd != 1: os.dup2(fd, 1)
634 638 if fd != 2: os.dup2(fd, 2)
635 639 if fd not in (0, 1, 2): os.close(fd)
636 640
637 641 if runfn:
638 642 return runfn()
639 643
640 644 class changeset_printer(object):
641 645 '''show changeset information when templating not requested.'''
642 646
643 647 def __init__(self, ui, repo, patch, buffered):
644 648 self.ui = ui
645 649 self.repo = repo
646 650 self.buffered = buffered
647 651 self.patch = patch
648 652 self.header = {}
649 653 self.hunk = {}
650 654 self.lastheader = None
651 655
652 656 def flush(self, rev):
653 657 if rev in self.header:
654 658 h = self.header[rev]
655 659 if h != self.lastheader:
656 660 self.lastheader = h
657 661 self.ui.write(h)
658 662 del self.header[rev]
659 663 if rev in self.hunk:
660 664 self.ui.write(self.hunk[rev])
661 665 del self.hunk[rev]
662 666 return 1
663 667 return 0
664 668
665 669 def show(self, rev=0, changenode=None, copies=(), **props):
666 670 if self.buffered:
667 671 self.ui.pushbuffer()
668 672 self._show(rev, changenode, copies, props)
669 673 self.hunk[rev] = self.ui.popbuffer()
670 674 else:
671 675 self._show(rev, changenode, copies, props)
672 676
673 677 def _show(self, rev, changenode, copies, props):
674 678 '''show a single changeset or file revision'''
675 679 log = self.repo.changelog
676 680 if changenode is None:
677 681 changenode = log.node(rev)
678 682 elif not rev:
679 683 rev = log.rev(changenode)
680 684
681 685 if self.ui.quiet:
682 686 self.ui.write("%d:%s\n" % (rev, short(changenode)))
683 687 return
684 688
685 689 changes = log.read(changenode)
686 690 date = util.datestr(changes[2])
687 691 extra = changes[5]
688 692 branch = extra.get("branch")
689 693
690 694 hexfunc = self.ui.debugflag and hex or short
691 695
692 696 parents = log.parentrevs(rev)
693 697 if not self.ui.debugflag:
694 698 if parents[1] == nullrev:
695 699 if parents[0] >= rev - 1:
696 700 parents = []
697 701 else:
698 702 parents = [parents[0]]
699 703 parents = [(p, hexfunc(log.node(p))) for p in parents]
700 704
701 705 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
702 706
703 707 # don't show the default branch name
704 708 if branch != 'default':
705 709 branch = util.tolocal(branch)
706 710 self.ui.write(_("branch: %s\n") % branch)
707 711 for tag in self.repo.nodetags(changenode):
708 712 self.ui.write(_("tag: %s\n") % tag)
709 713 for parent in parents:
710 714 self.ui.write(_("parent: %d:%s\n") % parent)
711 715
712 716 if self.ui.debugflag:
713 717 self.ui.write(_("manifest: %d:%s\n") %
714 718 (self.repo.manifest.rev(changes[0]), hex(changes[0])))
715 719 self.ui.write(_("user: %s\n") % changes[1])
716 720 self.ui.write(_("date: %s\n") % date)
717 721
718 722 if self.ui.debugflag:
719 723 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
720 724 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
721 725 files):
722 726 if value:
723 727 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
724 728 elif changes[3] and self.ui.verbose:
725 729 self.ui.write(_("files: %s\n") % " ".join(changes[3]))
726 730 if copies and self.ui.verbose:
727 731 copies = ['%s (%s)' % c for c in copies]
728 732 self.ui.write(_("copies: %s\n") % ' '.join(copies))
729 733
730 734 if extra and self.ui.debugflag:
731 735 extraitems = extra.items()
732 736 extraitems.sort()
733 737 for key, value in extraitems:
734 738 self.ui.write(_("extra: %s=%s\n")
735 739 % (key, value.encode('string_escape')))
736 740
737 741 description = changes[4].strip()
738 742 if description:
739 743 if self.ui.verbose:
740 744 self.ui.write(_("description:\n"))
741 745 self.ui.write(description)
742 746 self.ui.write("\n\n")
743 747 else:
744 748 self.ui.write(_("summary: %s\n") %
745 749 description.splitlines()[0])
746 750 self.ui.write("\n")
747 751
748 752 self.showpatch(changenode)
749 753
750 754 def showpatch(self, node):
751 755 if self.patch:
752 756 prev = self.repo.changelog.parents(node)[0]
753 757 patch.diff(self.repo, prev, node, match=self.patch, fp=self.ui)
754 758 self.ui.write("\n")
755 759
756 760 class changeset_templater(changeset_printer):
757 761 '''format changeset information.'''
758 762
759 763 def __init__(self, ui, repo, patch, mapfile, buffered):
760 764 changeset_printer.__init__(self, ui, repo, patch, buffered)
761 765 filters = templater.common_filters.copy()
762 766 filters['formatnode'] = (ui.debugflag and (lambda x: x)
763 767 or (lambda x: x[:12]))
764 768 self.t = templater.templater(mapfile, filters,
765 769 cache={
766 770 'parent': '{rev}:{node|formatnode} ',
767 771 'manifest': '{rev}:{node|formatnode}',
768 772 'filecopy': '{name} ({source})'})
769 773
770 774 def use_template(self, t):
771 775 '''set template string to use'''
772 776 self.t.cache['changeset'] = t
773 777
774 778 def _show(self, rev, changenode, copies, props):
775 779 '''show a single changeset or file revision'''
776 780 log = self.repo.changelog
777 781 if changenode is None:
778 782 changenode = log.node(rev)
779 783 elif not rev:
780 784 rev = log.rev(changenode)
781 785
782 786 changes = log.read(changenode)
783 787
784 788 def showlist(name, values, plural=None, **args):
785 789 '''expand set of values.
786 790 name is name of key in template map.
787 791 values is list of strings or dicts.
788 792 plural is plural of name, if not simply name + 's'.
789 793
790 794 expansion works like this, given name 'foo'.
791 795
792 796 if values is empty, expand 'no_foos'.
793 797
794 798 if 'foo' not in template map, return values as a string,
795 799 joined by space.
796 800
797 801 expand 'start_foos'.
798 802
799 803 for each value, expand 'foo'. if 'last_foo' in template
800 804 map, expand it instead of 'foo' for last key.
801 805
802 806 expand 'end_foos'.
803 807 '''
804 808 if plural: names = plural
805 809 else: names = name + 's'
806 810 if not values:
807 811 noname = 'no_' + names
808 812 if noname in self.t:
809 813 yield self.t(noname, **args)
810 814 return
811 815 if name not in self.t:
812 816 if isinstance(values[0], str):
813 817 yield ' '.join(values)
814 818 else:
815 819 for v in values:
816 820 yield dict(v, **args)
817 821 return
818 822 startname = 'start_' + names
819 823 if startname in self.t:
820 824 yield self.t(startname, **args)
821 825 vargs = args.copy()
822 826 def one(v, tag=name):
823 827 try:
824 828 vargs.update(v)
825 829 except (AttributeError, ValueError):
826 830 try:
827 831 for a, b in v:
828 832 vargs[a] = b
829 833 except ValueError:
830 834 vargs[name] = v
831 835 return self.t(tag, **vargs)
832 836 lastname = 'last_' + name
833 837 if lastname in self.t:
834 838 last = values.pop()
835 839 else:
836 840 last = None
837 841 for v in values:
838 842 yield one(v)
839 843 if last is not None:
840 844 yield one(last, tag=lastname)
841 845 endname = 'end_' + names
842 846 if endname in self.t:
843 847 yield self.t(endname, **args)
844 848
845 849 def showbranches(**args):
846 850 branch = changes[5].get("branch")
847 851 if branch != 'default':
848 852 branch = util.tolocal(branch)
849 853 return showlist('branch', [branch], plural='branches', **args)
850 854
851 855 def showparents(**args):
852 856 parents = [[('rev', log.rev(p)), ('node', hex(p))]
853 857 for p in log.parents(changenode)
854 858 if self.ui.debugflag or p != nullid]
855 859 if (not self.ui.debugflag and len(parents) == 1 and
856 860 parents[0][0][1] == rev - 1):
857 861 return
858 862 return showlist('parent', parents, **args)
859 863
860 864 def showtags(**args):
861 865 return showlist('tag', self.repo.nodetags(changenode), **args)
862 866
863 867 def showextras(**args):
864 868 extras = changes[5].items()
865 869 extras.sort()
866 870 for key, value in extras:
867 871 args = args.copy()
868 872 args.update(dict(key=key, value=value))
869 873 yield self.t('extra', **args)
870 874
871 875 def showcopies(**args):
872 876 c = [{'name': x[0], 'source': x[1]} for x in copies]
873 877 return showlist('file_copy', c, plural='file_copies', **args)
874 878
875 879 if self.ui.debugflag:
876 880 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
877 881 def showfiles(**args):
878 882 return showlist('file', files[0], **args)
879 883 def showadds(**args):
880 884 return showlist('file_add', files[1], **args)
881 885 def showdels(**args):
882 886 return showlist('file_del', files[2], **args)
883 887 def showmanifest(**args):
884 888 args = args.copy()
885 889 args.update(dict(rev=self.repo.manifest.rev(changes[0]),
886 890 node=hex(changes[0])))
887 891 return self.t('manifest', **args)
888 892 else:
889 893 def showfiles(**args):
890 894 return showlist('file', changes[3], **args)
891 895 showadds = ''
892 896 showdels = ''
893 897 showmanifest = ''
894 898
895 899 defprops = {
896 900 'author': changes[1],
897 901 'branches': showbranches,
898 902 'date': changes[2],
899 903 'desc': changes[4],
900 904 'file_adds': showadds,
901 905 'file_dels': showdels,
902 906 'files': showfiles,
903 907 'file_copies': showcopies,
904 908 'manifest': showmanifest,
905 909 'node': hex(changenode),
906 910 'parents': showparents,
907 911 'rev': rev,
908 912 'tags': showtags,
909 913 'extras': showextras,
910 914 }
911 915 props = props.copy()
912 916 props.update(defprops)
913 917
914 918 try:
915 919 if self.ui.debugflag and 'header_debug' in self.t:
916 920 key = 'header_debug'
917 921 elif self.ui.quiet and 'header_quiet' in self.t:
918 922 key = 'header_quiet'
919 923 elif self.ui.verbose and 'header_verbose' in self.t:
920 924 key = 'header_verbose'
921 925 elif 'header' in self.t:
922 926 key = 'header'
923 927 else:
924 928 key = ''
925 929 if key:
926 930 h = templater.stringify(self.t(key, **props))
927 931 if self.buffered:
928 932 self.header[rev] = h
929 933 else:
930 934 self.ui.write(h)
931 935 if self.ui.debugflag and 'changeset_debug' in self.t:
932 936 key = 'changeset_debug'
933 937 elif self.ui.quiet and 'changeset_quiet' in self.t:
934 938 key = 'changeset_quiet'
935 939 elif self.ui.verbose and 'changeset_verbose' in self.t:
936 940 key = 'changeset_verbose'
937 941 else:
938 942 key = 'changeset'
939 943 self.ui.write(templater.stringify(self.t(key, **props)))
940 944 self.showpatch(changenode)
941 945 except KeyError, inst:
942 946 raise util.Abort(_("%s: no key named '%s'") % (self.t.mapfile,
943 947 inst.args[0]))
944 948 except SyntaxError, inst:
945 949 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
946 950
947 951 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
948 952 """show one changeset using template or regular display.
949 953
950 954 Display format will be the first non-empty hit of:
951 955 1. option 'template'
952 956 2. option 'style'
953 957 3. [ui] setting 'logtemplate'
954 958 4. [ui] setting 'style'
955 959 If all of these values are either the unset or the empty string,
956 960 regular display via changeset_printer() is done.
957 961 """
958 962 # options
959 963 patch = False
960 964 if opts.get('patch'):
961 965 patch = matchfn or util.always
962 966
963 967 tmpl = opts.get('template')
964 968 mapfile = None
965 969 if tmpl:
966 970 tmpl = templater.parsestring(tmpl, quoted=False)
967 971 else:
968 972 mapfile = opts.get('style')
969 973 # ui settings
970 974 if not mapfile:
971 975 tmpl = ui.config('ui', 'logtemplate')
972 976 if tmpl:
973 977 tmpl = templater.parsestring(tmpl)
974 978 else:
975 979 mapfile = ui.config('ui', 'style')
976 980
977 981 if tmpl or mapfile:
978 982 if mapfile:
979 983 if not os.path.split(mapfile)[0]:
980 984 mapname = (templater.templatepath('map-cmdline.' + mapfile)
981 985 or templater.templatepath(mapfile))
982 986 if mapname: mapfile = mapname
983 987 try:
984 988 t = changeset_templater(ui, repo, patch, mapfile, buffered)
985 989 except SyntaxError, inst:
986 990 raise util.Abort(inst.args[0])
987 991 if tmpl: t.use_template(tmpl)
988 992 return t
989 993 return changeset_printer(ui, repo, patch, buffered)
990 994
991 995 def finddate(ui, repo, date):
992 996 """Find the tipmost changeset that matches the given date spec"""
993 997 df = util.matchdate(date + " to " + date)
994 998 get = util.cachefunc(lambda r: repo.changectx(r).changeset())
995 999 changeiter, matchfn = walkchangerevs(ui, repo, [], get, {'rev':None})
996 1000 results = {}
997 1001 for st, rev, fns in changeiter:
998 1002 if st == 'add':
999 1003 d = get(rev)[2]
1000 1004 if df(d[0]):
1001 1005 results[rev] = d
1002 1006 elif st == 'iter':
1003 1007 if rev in results:
1004 1008 ui.status("Found revision %s from %s\n" %
1005 1009 (rev, util.datestr(results[rev])))
1006 1010 return str(rev)
1007 1011
1008 1012 raise util.Abort(_("revision matching date not found"))
1009 1013
1010 1014 def walkchangerevs(ui, repo, pats, change, opts):
1011 1015 '''Iterate over files and the revs they changed in.
1012 1016
1013 1017 Callers most commonly need to iterate backwards over the history
1014 1018 it is interested in. Doing so has awful (quadratic-looking)
1015 1019 performance, so we use iterators in a "windowed" way.
1016 1020
1017 1021 We walk a window of revisions in the desired order. Within the
1018 1022 window, we first walk forwards to gather data, then in the desired
1019 1023 order (usually backwards) to display it.
1020 1024
1021 1025 This function returns an (iterator, matchfn) tuple. The iterator
1022 1026 yields 3-tuples. They will be of one of the following forms:
1023 1027
1024 1028 "window", incrementing, lastrev: stepping through a window,
1025 1029 positive if walking forwards through revs, last rev in the
1026 1030 sequence iterated over - use to reset state for the current window
1027 1031
1028 1032 "add", rev, fns: out-of-order traversal of the given file names
1029 1033 fns, which changed during revision rev - use to gather data for
1030 1034 possible display
1031 1035
1032 1036 "iter", rev, None: in-order traversal of the revs earlier iterated
1033 1037 over with "add" - use to display data'''
1034 1038
1035 1039 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1036 1040 if start < end:
1037 1041 while start < end:
1038 1042 yield start, min(windowsize, end-start)
1039 1043 start += windowsize
1040 1044 if windowsize < sizelimit:
1041 1045 windowsize *= 2
1042 1046 else:
1043 1047 while start > end:
1044 1048 yield start, min(windowsize, start-end-1)
1045 1049 start -= windowsize
1046 1050 if windowsize < sizelimit:
1047 1051 windowsize *= 2
1048 1052
1049 1053 files, matchfn, anypats = matchpats(repo, pats, opts)
1050 1054 follow = opts.get('follow') or opts.get('follow_first')
1051 1055
1052 1056 if repo.changelog.count() == 0:
1053 1057 return [], matchfn
1054 1058
1055 1059 if follow:
1056 1060 defrange = '%s:0' % repo.changectx().rev()
1057 1061 else:
1058 1062 defrange = 'tip:0'
1059 1063 revs = revrange(repo, opts['rev'] or [defrange])
1060 1064 wanted = {}
1061 1065 slowpath = anypats or opts.get('removed')
1062 1066 fncache = {}
1063 1067
1064 1068 if not slowpath and not files:
1065 1069 # No files, no patterns. Display all revs.
1066 1070 wanted = dict.fromkeys(revs)
1067 1071 copies = []
1068 1072 if not slowpath:
1069 1073 # Only files, no patterns. Check the history of each file.
1070 1074 def filerevgen(filelog, node):
1071 1075 cl_count = repo.changelog.count()
1072 1076 if node is None:
1073 1077 last = filelog.count() - 1
1074 1078 else:
1075 1079 last = filelog.rev(node)
1076 1080 for i, window in increasing_windows(last, nullrev):
1077 1081 revs = []
1078 1082 for j in xrange(i - window, i + 1):
1079 1083 n = filelog.node(j)
1080 1084 revs.append((filelog.linkrev(n),
1081 1085 follow and filelog.renamed(n)))
1082 1086 revs.reverse()
1083 1087 for rev in revs:
1084 1088 # only yield rev for which we have the changelog, it can
1085 1089 # happen while doing "hg log" during a pull or commit
1086 1090 if rev[0] < cl_count:
1087 1091 yield rev
1088 1092 def iterfiles():
1089 1093 for filename in files:
1090 1094 yield filename, None
1091 1095 for filename_node in copies:
1092 1096 yield filename_node
1093 1097 minrev, maxrev = min(revs), max(revs)
1094 1098 for file_, node in iterfiles():
1095 1099 filelog = repo.file(file_)
1096 1100 # A zero count may be a directory or deleted file, so
1097 1101 # try to find matching entries on the slow path.
1098 1102 if filelog.count() == 0:
1099 1103 slowpath = True
1100 1104 break
1101 1105 for rev, copied in filerevgen(filelog, node):
1102 1106 if rev <= maxrev:
1103 1107 if rev < minrev:
1104 1108 break
1105 1109 fncache.setdefault(rev, [])
1106 1110 fncache[rev].append(file_)
1107 1111 wanted[rev] = 1
1108 1112 if follow and copied:
1109 1113 copies.append(copied)
1110 1114 if slowpath:
1111 1115 if follow:
1112 1116 raise util.Abort(_('can only follow copies/renames for explicit '
1113 1117 'file names'))
1114 1118
1115 1119 # The slow path checks files modified in every changeset.
1116 1120 def changerevgen():
1117 1121 for i, window in increasing_windows(repo.changelog.count()-1,
1118 1122 nullrev):
1119 1123 for j in xrange(i - window, i + 1):
1120 1124 yield j, change(j)[3]
1121 1125
1122 1126 for rev, changefiles in changerevgen():
1123 1127 matches = filter(matchfn, changefiles)
1124 1128 if matches:
1125 1129 fncache[rev] = matches
1126 1130 wanted[rev] = 1
1127 1131
1128 1132 class followfilter:
1129 1133 def __init__(self, onlyfirst=False):
1130 1134 self.startrev = nullrev
1131 1135 self.roots = []
1132 1136 self.onlyfirst = onlyfirst
1133 1137
1134 1138 def match(self, rev):
1135 1139 def realparents(rev):
1136 1140 if self.onlyfirst:
1137 1141 return repo.changelog.parentrevs(rev)[0:1]
1138 1142 else:
1139 1143 return filter(lambda x: x != nullrev,
1140 1144 repo.changelog.parentrevs(rev))
1141 1145
1142 1146 if self.startrev == nullrev:
1143 1147 self.startrev = rev
1144 1148 return True
1145 1149
1146 1150 if rev > self.startrev:
1147 1151 # forward: all descendants
1148 1152 if not self.roots:
1149 1153 self.roots.append(self.startrev)
1150 1154 for parent in realparents(rev):
1151 1155 if parent in self.roots:
1152 1156 self.roots.append(rev)
1153 1157 return True
1154 1158 else:
1155 1159 # backwards: all parents
1156 1160 if not self.roots:
1157 1161 self.roots.extend(realparents(self.startrev))
1158 1162 if rev in self.roots:
1159 1163 self.roots.remove(rev)
1160 1164 self.roots.extend(realparents(rev))
1161 1165 return True
1162 1166
1163 1167 return False
1164 1168
1165 1169 # it might be worthwhile to do this in the iterator if the rev range
1166 1170 # is descending and the prune args are all within that range
1167 1171 for rev in opts.get('prune', ()):
1168 1172 rev = repo.changelog.rev(repo.lookup(rev))
1169 1173 ff = followfilter()
1170 1174 stop = min(revs[0], revs[-1])
1171 1175 for x in xrange(rev, stop-1, -1):
1172 1176 if ff.match(x) and x in wanted:
1173 1177 del wanted[x]
1174 1178
1175 1179 def iterate():
1176 1180 if follow and not files:
1177 1181 ff = followfilter(onlyfirst=opts.get('follow_first'))
1178 1182 def want(rev):
1179 1183 if ff.match(rev) and rev in wanted:
1180 1184 return True
1181 1185 return False
1182 1186 else:
1183 1187 def want(rev):
1184 1188 return rev in wanted
1185 1189
1186 1190 for i, window in increasing_windows(0, len(revs)):
1187 1191 yield 'window', revs[0] < revs[-1], revs[-1]
1188 1192 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1189 1193 srevs = list(nrevs)
1190 1194 srevs.sort()
1191 1195 for rev in srevs:
1192 1196 fns = fncache.get(rev)
1193 1197 if not fns:
1194 1198 def fns_generator():
1195 1199 for f in change(rev)[3]:
1196 1200 if matchfn(f):
1197 1201 yield f
1198 1202 fns = fns_generator()
1199 1203 yield 'add', rev, fns
1200 1204 for rev in nrevs:
1201 1205 yield 'iter', rev, None
1202 1206 return iterate(), matchfn
@@ -1,1969 +1,1965
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util, extensions
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 extensions.loadall(self.ui)
80 80 except IOError:
81 81 pass
82 82
83 fallback = self.ui.config('ui', 'fallbackencoding')
84 if fallback:
85 util._fallbackencoding = fallback
86
87 83 self.tagscache = None
88 84 self.branchcache = None
89 85 self.nodetagscache = None
90 86 self.filterpats = {}
91 87 self.transhandle = None
92 88
93 89 def __getattr__(self, name):
94 90 if name == 'changelog':
95 91 self.changelog = changelog.changelog(self.sopener)
96 92 self.sopener.defversion = self.changelog.version
97 93 return self.changelog
98 94 if name == 'manifest':
99 95 self.changelog
100 96 self.manifest = manifest.manifest(self.sopener)
101 97 return self.manifest
102 98 if name == 'dirstate':
103 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
104 100 return self.dirstate
105 101 else:
106 102 raise AttributeError, name
107 103
108 104 def url(self):
109 105 return 'file:' + self.root
110 106
111 107 def hook(self, name, throw=False, **args):
112 108 def callhook(hname, funcname):
113 109 '''call python hook. hook is callable object, looked up as
114 110 name in python module. if callable returns "true", hook
115 111 fails, else passes. if hook raises exception, treated as
116 112 hook failure. exception propagates if throw is "true".
117 113
118 114 reason for "true" meaning "hook failed" is so that
119 115 unmodified commands (e.g. mercurial.commands.update) can
120 116 be run as hooks without wrappers to convert return values.'''
121 117
122 118 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
123 119 obj = funcname
124 120 if not callable(obj):
125 121 d = funcname.rfind('.')
126 122 if d == -1:
127 123 raise util.Abort(_('%s hook is invalid ("%s" not in '
128 124 'a module)') % (hname, funcname))
129 125 modname = funcname[:d]
130 126 try:
131 127 obj = __import__(modname)
132 128 except ImportError:
133 129 try:
134 130 # extensions are loaded with hgext_ prefix
135 131 obj = __import__("hgext_%s" % modname)
136 132 except ImportError:
137 133 raise util.Abort(_('%s hook is invalid '
138 134 '(import of "%s" failed)') %
139 135 (hname, modname))
140 136 try:
141 137 for p in funcname.split('.')[1:]:
142 138 obj = getattr(obj, p)
143 139 except AttributeError, err:
144 140 raise util.Abort(_('%s hook is invalid '
145 141 '("%s" is not defined)') %
146 142 (hname, funcname))
147 143 if not callable(obj):
148 144 raise util.Abort(_('%s hook is invalid '
149 145 '("%s" is not callable)') %
150 146 (hname, funcname))
151 147 try:
152 148 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
153 149 except (KeyboardInterrupt, util.SignalInterrupt):
154 150 raise
155 151 except Exception, exc:
156 152 if isinstance(exc, util.Abort):
157 153 self.ui.warn(_('error: %s hook failed: %s\n') %
158 154 (hname, exc.args[0]))
159 155 else:
160 156 self.ui.warn(_('error: %s hook raised an exception: '
161 157 '%s\n') % (hname, exc))
162 158 if throw:
163 159 raise
164 160 self.ui.print_exc()
165 161 return True
166 162 if r:
167 163 if throw:
168 164 raise util.Abort(_('%s hook failed') % hname)
169 165 self.ui.warn(_('warning: %s hook failed\n') % hname)
170 166 return r
171 167
172 168 def runhook(name, cmd):
173 169 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
174 170 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
175 171 r = util.system(cmd, environ=env, cwd=self.root)
176 172 if r:
177 173 desc, r = util.explain_exit(r)
178 174 if throw:
179 175 raise util.Abort(_('%s hook %s') % (name, desc))
180 176 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
181 177 return r
182 178
183 179 r = False
184 180 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
185 181 if hname.split(".", 1)[0] == name and cmd]
186 182 hooks.sort()
187 183 for hname, cmd in hooks:
188 184 if callable(cmd):
189 185 r = callhook(hname, cmd) or r
190 186 elif cmd.startswith('python:'):
191 187 r = callhook(hname, cmd[7:].strip()) or r
192 188 else:
193 189 r = runhook(hname, cmd) or r
194 190 return r
195 191
196 192 tag_disallowed = ':\r\n'
197 193
198 194 def _tag(self, name, node, message, local, user, date, parent=None):
199 195 use_dirstate = parent is None
200 196
201 197 for c in self.tag_disallowed:
202 198 if c in name:
203 199 raise util.Abort(_('%r cannot be used in a tag name') % c)
204 200
205 201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
206 202
207 203 if local:
208 204 # local tags are stored in the current charset
209 205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
210 206 self.hook('tag', node=hex(node), tag=name, local=local)
211 207 return
212 208
213 209 # committed tags are stored in UTF-8
214 210 line = '%s %s\n' % (hex(node), util.fromlocal(name))
215 211 if use_dirstate:
216 212 self.wfile('.hgtags', 'ab').write(line)
217 213 else:
218 214 ntags = self.filectx('.hgtags', parent).data()
219 215 self.wfile('.hgtags', 'ab').write(ntags + line)
220 216 if use_dirstate and self.dirstate.state('.hgtags') == '?':
221 217 self.add(['.hgtags'])
222 218
223 219 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
224 220
225 221 self.hook('tag', node=hex(node), tag=name, local=local)
226 222
227 223 return tagnode
228 224
229 225 def tag(self, name, node, message, local, user, date):
230 226 '''tag a revision with a symbolic name.
231 227
232 228 if local is True, the tag is stored in a per-repository file.
233 229 otherwise, it is stored in the .hgtags file, and a new
234 230 changeset is committed with the change.
235 231
236 232 keyword arguments:
237 233
238 234 local: whether to store tag in non-version-controlled file
239 235 (default False)
240 236
241 237 message: commit message to use if committing
242 238
243 239 user: name of user to use if committing
244 240
245 241 date: date tuple to use if committing'''
246 242
247 243 for x in self.status()[:5]:
248 244 if '.hgtags' in x:
249 245 raise util.Abort(_('working copy of .hgtags is changed '
250 246 '(please commit .hgtags manually)'))
251 247
252 248
253 249 self._tag(name, node, message, local, user, date)
254 250
255 251 def tags(self):
256 252 '''return a mapping of tag to node'''
257 253 if self.tagscache:
258 254 return self.tagscache
259 255
260 256 globaltags = {}
261 257
262 258 def readtags(lines, fn):
263 259 filetags = {}
264 260 count = 0
265 261
266 262 def warn(msg):
267 263 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
268 264
269 265 for l in lines:
270 266 count += 1
271 267 if not l:
272 268 continue
273 269 s = l.split(" ", 1)
274 270 if len(s) != 2:
275 271 warn(_("cannot parse entry"))
276 272 continue
277 273 node, key = s
278 274 key = util.tolocal(key.strip()) # stored in UTF-8
279 275 try:
280 276 bin_n = bin(node)
281 277 except TypeError:
282 278 warn(_("node '%s' is not well formed") % node)
283 279 continue
284 280 if bin_n not in self.changelog.nodemap:
285 281 warn(_("tag '%s' refers to unknown node") % key)
286 282 continue
287 283
288 284 h = []
289 285 if key in filetags:
290 286 n, h = filetags[key]
291 287 h.append(n)
292 288 filetags[key] = (bin_n, h)
293 289
294 290 for k,nh in filetags.items():
295 291 if k not in globaltags:
296 292 globaltags[k] = nh
297 293 continue
298 294 # we prefer the global tag if:
299 295 # it supercedes us OR
300 296 # mutual supercedes and it has a higher rank
301 297 # otherwise we win because we're tip-most
302 298 an, ah = nh
303 299 bn, bh = globaltags[k]
304 300 if bn != an and an in bh and \
305 301 (bn not in ah or len(bh) > len(ah)):
306 302 an = bn
307 303 ah.extend([n for n in bh if n not in ah])
308 304 globaltags[k] = an, ah
309 305
310 306 # read the tags file from each head, ending with the tip
311 307 f = None
312 308 for rev, node, fnode in self._hgtagsnodes():
313 309 f = (f and f.filectx(fnode) or
314 310 self.filectx('.hgtags', fileid=fnode))
315 311 readtags(f.data().splitlines(), f)
316 312
317 313 try:
318 314 data = util.fromlocal(self.opener("localtags").read())
319 315 # localtags are stored in the local character set
320 316 # while the internal tag table is stored in UTF-8
321 317 readtags(data.splitlines(), "localtags")
322 318 except IOError:
323 319 pass
324 320
325 321 self.tagscache = {}
326 322 for k,nh in globaltags.items():
327 323 n = nh[0]
328 324 if n != nullid:
329 325 self.tagscache[k] = n
330 326 self.tagscache['tip'] = self.changelog.tip()
331 327
332 328 return self.tagscache
333 329
334 330 def _hgtagsnodes(self):
335 331 heads = self.heads()
336 332 heads.reverse()
337 333 last = {}
338 334 ret = []
339 335 for node in heads:
340 336 c = self.changectx(node)
341 337 rev = c.rev()
342 338 try:
343 339 fnode = c.filenode('.hgtags')
344 340 except revlog.LookupError:
345 341 continue
346 342 ret.append((rev, node, fnode))
347 343 if fnode in last:
348 344 ret[last[fnode]] = None
349 345 last[fnode] = len(ret) - 1
350 346 return [item for item in ret if item]
351 347
352 348 def tagslist(self):
353 349 '''return a list of tags ordered by revision'''
354 350 l = []
355 351 for t, n in self.tags().items():
356 352 try:
357 353 r = self.changelog.rev(n)
358 354 except:
359 355 r = -2 # sort to the beginning of the list if unknown
360 356 l.append((r, t, n))
361 357 l.sort()
362 358 return [(t, n) for r, t, n in l]
363 359
364 360 def nodetags(self, node):
365 361 '''return the tags associated with a node'''
366 362 if not self.nodetagscache:
367 363 self.nodetagscache = {}
368 364 for t, n in self.tags().items():
369 365 self.nodetagscache.setdefault(n, []).append(t)
370 366 return self.nodetagscache.get(node, [])
371 367
372 368 def _branchtags(self):
373 369 partial, last, lrev = self._readbranchcache()
374 370
375 371 tiprev = self.changelog.count() - 1
376 372 if lrev != tiprev:
377 373 self._updatebranchcache(partial, lrev+1, tiprev+1)
378 374 self._writebranchcache(partial, self.changelog.tip(), tiprev)
379 375
380 376 return partial
381 377
382 378 def branchtags(self):
383 379 if self.branchcache is not None:
384 380 return self.branchcache
385 381
386 382 self.branchcache = {} # avoid recursion in changectx
387 383 partial = self._branchtags()
388 384
389 385 # the branch cache is stored on disk as UTF-8, but in the local
390 386 # charset internally
391 387 for k, v in partial.items():
392 388 self.branchcache[util.tolocal(k)] = v
393 389 return self.branchcache
394 390
395 391 def _readbranchcache(self):
396 392 partial = {}
397 393 try:
398 394 f = self.opener("branch.cache")
399 395 lines = f.read().split('\n')
400 396 f.close()
401 397 except (IOError, OSError):
402 398 return {}, nullid, nullrev
403 399
404 400 try:
405 401 last, lrev = lines.pop(0).split(" ", 1)
406 402 last, lrev = bin(last), int(lrev)
407 403 if not (lrev < self.changelog.count() and
408 404 self.changelog.node(lrev) == last): # sanity check
409 405 # invalidate the cache
410 406 raise ValueError('Invalid branch cache: unknown tip')
411 407 for l in lines:
412 408 if not l: continue
413 409 node, label = l.split(" ", 1)
414 410 partial[label.strip()] = bin(node)
415 411 except (KeyboardInterrupt, util.SignalInterrupt):
416 412 raise
417 413 except Exception, inst:
418 414 if self.ui.debugflag:
419 415 self.ui.warn(str(inst), '\n')
420 416 partial, last, lrev = {}, nullid, nullrev
421 417 return partial, last, lrev
422 418
423 419 def _writebranchcache(self, branches, tip, tiprev):
424 420 try:
425 421 f = self.opener("branch.cache", "w", atomictemp=True)
426 422 f.write("%s %s\n" % (hex(tip), tiprev))
427 423 for label, node in branches.iteritems():
428 424 f.write("%s %s\n" % (hex(node), label))
429 425 f.rename()
430 426 except (IOError, OSError):
431 427 pass
432 428
433 429 def _updatebranchcache(self, partial, start, end):
434 430 for r in xrange(start, end):
435 431 c = self.changectx(r)
436 432 b = c.branch()
437 433 partial[b] = c.node()
438 434
439 435 def lookup(self, key):
440 436 if key == '.':
441 437 key, second = self.dirstate.parents()
442 438 if key == nullid:
443 439 raise repo.RepoError(_("no revision checked out"))
444 440 if second != nullid:
445 441 self.ui.warn(_("warning: working directory has two parents, "
446 442 "tag '.' uses the first\n"))
447 443 elif key == 'null':
448 444 return nullid
449 445 n = self.changelog._match(key)
450 446 if n:
451 447 return n
452 448 if key in self.tags():
453 449 return self.tags()[key]
454 450 if key in self.branchtags():
455 451 return self.branchtags()[key]
456 452 n = self.changelog._partialmatch(key)
457 453 if n:
458 454 return n
459 455 raise repo.RepoError(_("unknown revision '%s'") % key)
460 456
461 457 def dev(self):
462 458 return os.lstat(self.path).st_dev
463 459
464 460 def local(self):
465 461 return True
466 462
467 463 def join(self, f):
468 464 return os.path.join(self.path, f)
469 465
470 466 def sjoin(self, f):
471 467 f = self.encodefn(f)
472 468 return os.path.join(self.spath, f)
473 469
474 470 def wjoin(self, f):
475 471 return os.path.join(self.root, f)
476 472
477 473 def file(self, f):
478 474 if f[0] == '/':
479 475 f = f[1:]
480 476 return filelog.filelog(self.sopener, f)
481 477
482 478 def changectx(self, changeid=None):
483 479 return context.changectx(self, changeid)
484 480
485 481 def workingctx(self):
486 482 return context.workingctx(self)
487 483
488 484 def parents(self, changeid=None):
489 485 '''
490 486 get list of changectxs for parents of changeid or working directory
491 487 '''
492 488 if changeid is None:
493 489 pl = self.dirstate.parents()
494 490 else:
495 491 n = self.changelog.lookup(changeid)
496 492 pl = self.changelog.parents(n)
497 493 if pl[1] == nullid:
498 494 return [self.changectx(pl[0])]
499 495 return [self.changectx(pl[0]), self.changectx(pl[1])]
500 496
501 497 def filectx(self, path, changeid=None, fileid=None):
502 498 """changeid can be a changeset revision, node, or tag.
503 499 fileid can be a file revision or node."""
504 500 return context.filectx(self, path, changeid, fileid)
505 501
506 502 def getcwd(self):
507 503 return self.dirstate.getcwd()
508 504
509 505 def pathto(self, f, cwd=None):
510 506 return self.dirstate.pathto(f, cwd)
511 507
512 508 def wfile(self, f, mode='r'):
513 509 return self.wopener(f, mode)
514 510
515 511 def _link(self, f):
516 512 return os.path.islink(self.wjoin(f))
517 513
518 514 def _filter(self, filter, filename, data):
519 515 if filter not in self.filterpats:
520 516 l = []
521 517 for pat, cmd in self.ui.configitems(filter):
522 518 mf = util.matcher(self.root, "", [pat], [], [])[1]
523 519 l.append((mf, cmd))
524 520 self.filterpats[filter] = l
525 521
526 522 for mf, cmd in self.filterpats[filter]:
527 523 if mf(filename):
528 524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
529 525 data = util.filter(data, cmd)
530 526 break
531 527
532 528 return data
533 529
534 530 def wread(self, filename):
535 531 if self._link(filename):
536 532 data = os.readlink(self.wjoin(filename))
537 533 else:
538 534 data = self.wopener(filename, 'r').read()
539 535 return self._filter("encode", filename, data)
540 536
541 537 def wwrite(self, filename, data, flags):
542 538 data = self._filter("decode", filename, data)
543 539 if "l" in flags:
544 540 f = self.wjoin(filename)
545 541 try:
546 542 os.unlink(f)
547 543 except OSError:
548 544 pass
549 545 d = os.path.dirname(f)
550 546 if not os.path.exists(d):
551 547 os.makedirs(d)
552 548 os.symlink(data, f)
553 549 else:
554 550 try:
555 551 if self._link(filename):
556 552 os.unlink(self.wjoin(filename))
557 553 except OSError:
558 554 pass
559 555 self.wopener(filename, 'w').write(data)
560 556 util.set_exec(self.wjoin(filename), "x" in flags)
561 557
562 558 def wwritedata(self, filename, data):
563 559 return self._filter("decode", filename, data)
564 560
565 561 def transaction(self):
566 562 tr = self.transhandle
567 563 if tr != None and tr.running():
568 564 return tr.nest()
569 565
570 566 # save dirstate for rollback
571 567 try:
572 568 ds = self.opener("dirstate").read()
573 569 except IOError:
574 570 ds = ""
575 571 self.opener("journal.dirstate", "w").write(ds)
576 572
577 573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 574 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
579 575 tr = transaction.transaction(self.ui.warn, self.sopener,
580 576 self.sjoin("journal"),
581 577 aftertrans(renames))
582 578 self.transhandle = tr
583 579 return tr
584 580
585 581 def recover(self):
586 582 l = self.lock()
587 583 if os.path.exists(self.sjoin("journal")):
588 584 self.ui.status(_("rolling back interrupted transaction\n"))
589 585 transaction.rollback(self.sopener, self.sjoin("journal"))
590 586 self.invalidate()
591 587 return True
592 588 else:
593 589 self.ui.warn(_("no interrupted transaction available\n"))
594 590 return False
595 591
596 592 def rollback(self, wlock=None, lock=None):
597 593 if not wlock:
598 594 wlock = self.wlock()
599 595 if not lock:
600 596 lock = self.lock()
601 597 if os.path.exists(self.sjoin("undo")):
602 598 self.ui.status(_("rolling back last transaction\n"))
603 599 transaction.rollback(self.sopener, self.sjoin("undo"))
604 600 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
605 601 self.invalidate()
606 602 self.dirstate.invalidate()
607 603 else:
608 604 self.ui.warn(_("no rollback information available\n"))
609 605
610 606 def invalidate(self):
611 607 for a in "changelog manifest".split():
612 608 if hasattr(self, a):
613 609 self.__delattr__(a)
614 610 self.tagscache = None
615 611 self.nodetagscache = None
616 612
617 613 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
618 614 desc=None):
619 615 try:
620 616 l = lock.lock(lockname, 0, releasefn, desc=desc)
621 617 except lock.LockHeld, inst:
622 618 if not wait:
623 619 raise
624 620 self.ui.warn(_("waiting for lock on %s held by %r\n") %
625 621 (desc, inst.locker))
626 622 # default to 600 seconds timeout
627 623 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
628 624 releasefn, desc=desc)
629 625 if acquirefn:
630 626 acquirefn()
631 627 return l
632 628
633 629 def lock(self, wait=1):
634 630 return self.do_lock(self.sjoin("lock"), wait,
635 631 acquirefn=self.invalidate,
636 632 desc=_('repository %s') % self.origroot)
637 633
638 634 def wlock(self, wait=1):
639 635 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
640 636 self.dirstate.invalidate,
641 637 desc=_('working directory of %s') % self.origroot)
642 638
643 639 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
644 640 """
645 641 commit an individual file as part of a larger transaction
646 642 """
647 643
648 644 t = self.wread(fn)
649 645 fl = self.file(fn)
650 646 fp1 = manifest1.get(fn, nullid)
651 647 fp2 = manifest2.get(fn, nullid)
652 648
653 649 meta = {}
654 650 cp = self.dirstate.copied(fn)
655 651 if cp:
656 652 # Mark the new revision of this file as a copy of another
657 653 # file. This copy data will effectively act as a parent
658 654 # of this new revision. If this is a merge, the first
659 655 # parent will be the nullid (meaning "look up the copy data")
660 656 # and the second one will be the other parent. For example:
661 657 #
662 658 # 0 --- 1 --- 3 rev1 changes file foo
663 659 # \ / rev2 renames foo to bar and changes it
664 660 # \- 2 -/ rev3 should have bar with all changes and
665 661 # should record that bar descends from
666 662 # bar in rev2 and foo in rev1
667 663 #
668 664 # this allows this merge to succeed:
669 665 #
670 666 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
671 667 # \ / merging rev3 and rev4 should use bar@rev2
672 668 # \- 2 --- 4 as the merge base
673 669 #
674 670 meta["copy"] = cp
675 671 if not manifest2: # not a branch merge
676 672 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 673 fp2 = nullid
678 674 elif fp2 != nullid: # copied on remote side
679 675 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 676 elif fp1 != nullid: # copied on local side, reversed
681 677 meta["copyrev"] = hex(manifest2.get(cp))
682 678 fp2 = fp1
683 679 else: # directory rename
684 680 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 681 self.ui.debug(_(" %s: copy %s:%s\n") %
686 682 (fn, cp, meta["copyrev"]))
687 683 fp1 = nullid
688 684 elif fp2 != nullid:
689 685 # is one parent an ancestor of the other?
690 686 fpa = fl.ancestor(fp1, fp2)
691 687 if fpa == fp1:
692 688 fp1, fp2 = fp2, nullid
693 689 elif fpa == fp2:
694 690 fp2 = nullid
695 691
696 692 # is the file unmodified from the parent? report existing entry
697 693 if fp2 == nullid and not fl.cmp(fp1, t):
698 694 return fp1
699 695
700 696 changelist.append(fn)
701 697 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
702 698
703 699 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
704 700 if p1 is None:
705 701 p1, p2 = self.dirstate.parents()
706 702 return self.commit(files=files, text=text, user=user, date=date,
707 703 p1=p1, p2=p2, wlock=wlock, extra=extra)
708 704
709 705 def commit(self, files=None, text="", user=None, date=None,
710 706 match=util.always, force=False, lock=None, wlock=None,
711 707 force_editor=False, p1=None, p2=None, extra={}):
712 708
713 709 commit = []
714 710 remove = []
715 711 changed = []
716 712 use_dirstate = (p1 is None) # not rawcommit
717 713 extra = extra.copy()
718 714
719 715 if use_dirstate:
720 716 if files:
721 717 for f in files:
722 718 s = self.dirstate.state(f)
723 719 if s in 'nmai':
724 720 commit.append(f)
725 721 elif s == 'r':
726 722 remove.append(f)
727 723 else:
728 724 self.ui.warn(_("%s not tracked!\n") % f)
729 725 else:
730 726 changes = self.status(match=match)[:5]
731 727 modified, added, removed, deleted, unknown = changes
732 728 commit = modified + added
733 729 remove = removed
734 730 else:
735 731 commit = files
736 732
737 733 if use_dirstate:
738 734 p1, p2 = self.dirstate.parents()
739 735 update_dirstate = True
740 736 else:
741 737 p1, p2 = p1, p2 or nullid
742 738 update_dirstate = (self.dirstate.parents()[0] == p1)
743 739
744 740 c1 = self.changelog.read(p1)
745 741 c2 = self.changelog.read(p2)
746 742 m1 = self.manifest.read(c1[0]).copy()
747 743 m2 = self.manifest.read(c2[0])
748 744
749 745 if use_dirstate:
750 746 branchname = self.workingctx().branch()
751 747 try:
752 748 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 749 except UnicodeDecodeError:
754 750 raise util.Abort(_('branch name not in UTF-8!'))
755 751 else:
756 752 branchname = ""
757 753
758 754 if use_dirstate:
759 755 oldname = c1[5].get("branch") # stored in UTF-8
760 756 if not commit and not remove and not force and p2 == nullid and \
761 757 branchname == oldname:
762 758 self.ui.status(_("nothing changed\n"))
763 759 return None
764 760
765 761 xp1 = hex(p1)
766 762 if p2 == nullid: xp2 = ''
767 763 else: xp2 = hex(p2)
768 764
769 765 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770 766
771 767 if not wlock:
772 768 wlock = self.wlock()
773 769 if not lock:
774 770 lock = self.lock()
775 771 tr = self.transaction()
776 772
777 773 # check in files
778 774 new = {}
779 775 linkrev = self.changelog.count()
780 776 commit.sort()
781 777 is_exec = util.execfunc(self.root, m1.execf)
782 778 is_link = util.linkfunc(self.root, m1.linkf)
783 779 for f in commit:
784 780 self.ui.note(f + "\n")
785 781 try:
786 782 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
787 783 new_exec = is_exec(f)
788 784 new_link = is_link(f)
789 785 if not changed or changed[-1] != f:
790 786 # mention the file in the changelog if some flag changed,
791 787 # even if there was no content change.
792 788 old_exec = m1.execf(f)
793 789 old_link = m1.linkf(f)
794 790 if old_exec != new_exec or old_link != new_link:
795 791 changed.append(f)
796 792 m1.set(f, new_exec, new_link)
797 793 except (OSError, IOError):
798 794 if use_dirstate:
799 795 self.ui.warn(_("trouble committing %s!\n") % f)
800 796 raise
801 797 else:
802 798 remove.append(f)
803 799
804 800 # update manifest
805 801 m1.update(new)
806 802 remove.sort()
807 803 removed = []
808 804
809 805 for f in remove:
810 806 if f in m1:
811 807 del m1[f]
812 808 removed.append(f)
813 809 elif f in m2:
814 810 removed.append(f)
815 811 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
816 812
817 813 # add changeset
818 814 new = new.keys()
819 815 new.sort()
820 816
821 817 user = user or self.ui.username()
822 818 if not text or force_editor:
823 819 edittext = []
824 820 if text:
825 821 edittext.append(text)
826 822 edittext.append("")
827 823 edittext.append("HG: user: %s" % user)
828 824 if p2 != nullid:
829 825 edittext.append("HG: branch merge")
830 826 if branchname:
831 827 edittext.append("HG: branch %s" % util.tolocal(branchname))
832 828 edittext.extend(["HG: changed %s" % f for f in changed])
833 829 edittext.extend(["HG: removed %s" % f for f in removed])
834 830 if not changed and not remove:
835 831 edittext.append("HG: no files changed")
836 832 edittext.append("")
837 833 # run editor in the repository root
838 834 olddir = os.getcwd()
839 835 os.chdir(self.root)
840 836 text = self.ui.edit("\n".join(edittext), user)
841 837 os.chdir(olddir)
842 838
843 839 lines = [line.rstrip() for line in text.rstrip().splitlines()]
844 840 while lines and not lines[0]:
845 841 del lines[0]
846 842 if not lines:
847 843 return None
848 844 text = '\n'.join(lines)
849 845 if branchname:
850 846 extra["branch"] = branchname
851 847 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
852 848 user, date, extra)
853 849 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
854 850 parent2=xp2)
855 851 tr.close()
856 852
857 853 if self.branchcache and "branch" in extra:
858 854 self.branchcache[util.tolocal(extra["branch"])] = n
859 855
860 856 if use_dirstate or update_dirstate:
861 857 self.dirstate.setparents(n)
862 858 if use_dirstate:
863 859 self.dirstate.update(new, "n")
864 860 self.dirstate.forget(removed)
865 861
866 862 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
867 863 return n
868 864
869 865 def walk(self, node=None, files=[], match=util.always, badmatch=None):
870 866 '''
871 867 walk recursively through the directory tree or a given
872 868 changeset, finding all files matched by the match
873 869 function
874 870
875 871 results are yielded in a tuple (src, filename), where src
876 872 is one of:
877 873 'f' the file was found in the directory tree
878 874 'm' the file was only in the dirstate and not in the tree
879 875 'b' file was not found and matched badmatch
880 876 '''
881 877
882 878 if node:
883 879 fdict = dict.fromkeys(files)
884 880 # for dirstate.walk, files=['.'] means "walk the whole tree".
885 881 # follow that here, too
886 882 fdict.pop('.', None)
887 883 mdict = self.manifest.read(self.changelog.read(node)[0])
888 884 mfiles = mdict.keys()
889 885 mfiles.sort()
890 886 for fn in mfiles:
891 887 for ffn in fdict:
892 888 # match if the file is the exact name or a directory
893 889 if ffn == fn or fn.startswith("%s/" % ffn):
894 890 del fdict[ffn]
895 891 break
896 892 if match(fn):
897 893 yield 'm', fn
898 894 ffiles = fdict.keys()
899 895 ffiles.sort()
900 896 for fn in ffiles:
901 897 if badmatch and badmatch(fn):
902 898 if match(fn):
903 899 yield 'b', fn
904 900 else:
905 901 self.ui.warn(_('%s: No such file in rev %s\n')
906 902 % (self.pathto(fn), short(node)))
907 903 else:
908 904 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
909 905 yield src, fn
910 906
911 907 def status(self, node1=None, node2=None, files=[], match=util.always,
912 908 wlock=None, list_ignored=False, list_clean=False):
913 909 """return status of files between two nodes or node and working directory
914 910
915 911 If node1 is None, use the first dirstate parent instead.
916 912 If node2 is None, compare node1 with working directory.
917 913 """
918 914
919 915 def fcmp(fn, getnode):
920 916 t1 = self.wread(fn)
921 917 return self.file(fn).cmp(getnode(fn), t1)
922 918
923 919 def mfmatches(node):
924 920 change = self.changelog.read(node)
925 921 mf = self.manifest.read(change[0]).copy()
926 922 for fn in mf.keys():
927 923 if not match(fn):
928 924 del mf[fn]
929 925 return mf
930 926
931 927 modified, added, removed, deleted, unknown = [], [], [], [], []
932 928 ignored, clean = [], []
933 929
934 930 compareworking = False
935 931 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
936 932 compareworking = True
937 933
938 934 if not compareworking:
939 935 # read the manifest from node1 before the manifest from node2,
940 936 # so that we'll hit the manifest cache if we're going through
941 937 # all the revisions in parent->child order.
942 938 mf1 = mfmatches(node1)
943 939
944 940 mywlock = False
945 941
946 942 # are we comparing the working directory?
947 943 if not node2:
948 944 (lookup, modified, added, removed, deleted, unknown,
949 945 ignored, clean) = self.dirstate.status(files, match,
950 946 list_ignored, list_clean)
951 947
952 948 # are we comparing working dir against its parent?
953 949 if compareworking:
954 950 if lookup:
955 951 # do a full compare of any files that might have changed
956 952 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
957 953 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
958 954 nullid)
959 955 for f in lookup:
960 956 if fcmp(f, getnode):
961 957 modified.append(f)
962 958 else:
963 959 if list_clean:
964 960 clean.append(f)
965 961 if not wlock and not mywlock:
966 962 mywlock = True
967 963 try:
968 964 wlock = self.wlock(wait=0)
969 965 except lock.LockException:
970 966 pass
971 967 if wlock:
972 968 self.dirstate.update([f], "n")
973 969 else:
974 970 # we are comparing working dir against non-parent
975 971 # generate a pseudo-manifest for the working dir
976 972 # XXX: create it in dirstate.py ?
977 973 mf2 = mfmatches(self.dirstate.parents()[0])
978 974 is_exec = util.execfunc(self.root, mf2.execf)
979 975 is_link = util.linkfunc(self.root, mf2.linkf)
980 976 for f in lookup + modified + added:
981 977 mf2[f] = ""
982 978 mf2.set(f, is_exec(f), is_link(f))
983 979 for f in removed:
984 980 if f in mf2:
985 981 del mf2[f]
986 982
987 983 if mywlock and wlock:
988 984 wlock.release()
989 985 else:
990 986 # we are comparing two revisions
991 987 mf2 = mfmatches(node2)
992 988
993 989 if not compareworking:
994 990 # flush lists from dirstate before comparing manifests
995 991 modified, added, clean = [], [], []
996 992
997 993 # make sure to sort the files so we talk to the disk in a
998 994 # reasonable order
999 995 mf2keys = mf2.keys()
1000 996 mf2keys.sort()
1001 997 getnode = lambda fn: mf1.get(fn, nullid)
1002 998 for fn in mf2keys:
1003 999 if mf1.has_key(fn):
1004 1000 if mf1.flags(fn) != mf2.flags(fn) or \
1005 1001 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1006 1002 fcmp(fn, getnode))):
1007 1003 modified.append(fn)
1008 1004 elif list_clean:
1009 1005 clean.append(fn)
1010 1006 del mf1[fn]
1011 1007 else:
1012 1008 added.append(fn)
1013 1009
1014 1010 removed = mf1.keys()
1015 1011
1016 1012 # sort and return results:
1017 1013 for l in modified, added, removed, deleted, unknown, ignored, clean:
1018 1014 l.sort()
1019 1015 return (modified, added, removed, deleted, unknown, ignored, clean)
1020 1016
1021 1017 def add(self, list, wlock=None):
1022 1018 if not wlock:
1023 1019 wlock = self.wlock()
1024 1020 for f in list:
1025 1021 p = self.wjoin(f)
1026 1022 try:
1027 1023 st = os.lstat(p)
1028 1024 except:
1029 1025 self.ui.warn(_("%s does not exist!\n") % f)
1030 1026 continue
1031 1027 if st.st_size > 10000000:
1032 1028 self.ui.warn(_("%s: files over 10MB may cause memory and"
1033 1029 " performance problems\n"
1034 1030 "(use 'hg revert %s' to unadd the file)\n")
1035 1031 % (f, f))
1036 1032 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1037 1033 self.ui.warn(_("%s not added: only files and symlinks "
1038 1034 "supported currently\n") % f)
1039 1035 elif self.dirstate.state(f) in 'an':
1040 1036 self.ui.warn(_("%s already tracked!\n") % f)
1041 1037 else:
1042 1038 self.dirstate.update([f], "a")
1043 1039
1044 1040 def forget(self, list, wlock=None):
1045 1041 if not wlock:
1046 1042 wlock = self.wlock()
1047 1043 for f in list:
1048 1044 if self.dirstate.state(f) not in 'ai':
1049 1045 self.ui.warn(_("%s not added!\n") % f)
1050 1046 else:
1051 1047 self.dirstate.forget([f])
1052 1048
1053 1049 def remove(self, list, unlink=False, wlock=None):
1054 1050 if unlink:
1055 1051 for f in list:
1056 1052 try:
1057 1053 util.unlink(self.wjoin(f))
1058 1054 except OSError, inst:
1059 1055 if inst.errno != errno.ENOENT:
1060 1056 raise
1061 1057 if not wlock:
1062 1058 wlock = self.wlock()
1063 1059 for f in list:
1064 1060 if unlink and os.path.exists(self.wjoin(f)):
1065 1061 self.ui.warn(_("%s still exists!\n") % f)
1066 1062 elif self.dirstate.state(f) == 'a':
1067 1063 self.dirstate.forget([f])
1068 1064 elif f not in self.dirstate:
1069 1065 self.ui.warn(_("%s not tracked!\n") % f)
1070 1066 else:
1071 1067 self.dirstate.update([f], "r")
1072 1068
1073 1069 def undelete(self, list, wlock=None):
1074 1070 p = self.dirstate.parents()[0]
1075 1071 mn = self.changelog.read(p)[0]
1076 1072 m = self.manifest.read(mn)
1077 1073 if not wlock:
1078 1074 wlock = self.wlock()
1079 1075 for f in list:
1080 1076 if self.dirstate.state(f) not in "r":
1081 1077 self.ui.warn("%s not removed!\n" % f)
1082 1078 else:
1083 1079 t = self.file(f).read(m[f])
1084 1080 self.wwrite(f, t, m.flags(f))
1085 1081 self.dirstate.update([f], "n")
1086 1082
1087 1083 def copy(self, source, dest, wlock=None):
1088 1084 p = self.wjoin(dest)
1089 1085 if not (os.path.exists(p) or os.path.islink(p)):
1090 1086 self.ui.warn(_("%s does not exist!\n") % dest)
1091 1087 elif not (os.path.isfile(p) or os.path.islink(p)):
1092 1088 self.ui.warn(_("copy failed: %s is not a file or a "
1093 1089 "symbolic link\n") % dest)
1094 1090 else:
1095 1091 if not wlock:
1096 1092 wlock = self.wlock()
1097 1093 if self.dirstate.state(dest) == '?':
1098 1094 self.dirstate.update([dest], "a")
1099 1095 self.dirstate.copy(source, dest)
1100 1096
1101 1097 def heads(self, start=None):
1102 1098 heads = self.changelog.heads(start)
1103 1099 # sort the output in rev descending order
1104 1100 heads = [(-self.changelog.rev(h), h) for h in heads]
1105 1101 heads.sort()
1106 1102 return [n for (r, n) in heads]
1107 1103
1108 1104 def branches(self, nodes):
1109 1105 if not nodes:
1110 1106 nodes = [self.changelog.tip()]
1111 1107 b = []
1112 1108 for n in nodes:
1113 1109 t = n
1114 1110 while 1:
1115 1111 p = self.changelog.parents(n)
1116 1112 if p[1] != nullid or p[0] == nullid:
1117 1113 b.append((t, n, p[0], p[1]))
1118 1114 break
1119 1115 n = p[0]
1120 1116 return b
1121 1117
1122 1118 def between(self, pairs):
1123 1119 r = []
1124 1120
1125 1121 for top, bottom in pairs:
1126 1122 n, l, i = top, [], 0
1127 1123 f = 1
1128 1124
1129 1125 while n != bottom:
1130 1126 p = self.changelog.parents(n)[0]
1131 1127 if i == f:
1132 1128 l.append(n)
1133 1129 f = f * 2
1134 1130 n = p
1135 1131 i += 1
1136 1132
1137 1133 r.append(l)
1138 1134
1139 1135 return r
1140 1136
1141 1137 def findincoming(self, remote, base=None, heads=None, force=False):
1142 1138 """Return list of roots of the subsets of missing nodes from remote
1143 1139
1144 1140 If base dict is specified, assume that these nodes and their parents
1145 1141 exist on the remote side and that no child of a node of base exists
1146 1142 in both remote and self.
1147 1143 Furthermore base will be updated to include the nodes that exists
1148 1144 in self and remote but no children exists in self and remote.
1149 1145 If a list of heads is specified, return only nodes which are heads
1150 1146 or ancestors of these heads.
1151 1147
1152 1148 All the ancestors of base are in self and in remote.
1153 1149 All the descendants of the list returned are missing in self.
1154 1150 (and so we know that the rest of the nodes are missing in remote, see
1155 1151 outgoing)
1156 1152 """
1157 1153 m = self.changelog.nodemap
1158 1154 search = []
1159 1155 fetch = {}
1160 1156 seen = {}
1161 1157 seenbranch = {}
1162 1158 if base == None:
1163 1159 base = {}
1164 1160
1165 1161 if not heads:
1166 1162 heads = remote.heads()
1167 1163
1168 1164 if self.changelog.tip() == nullid:
1169 1165 base[nullid] = 1
1170 1166 if heads != [nullid]:
1171 1167 return [nullid]
1172 1168 return []
1173 1169
1174 1170 # assume we're closer to the tip than the root
1175 1171 # and start by examining the heads
1176 1172 self.ui.status(_("searching for changes\n"))
1177 1173
1178 1174 unknown = []
1179 1175 for h in heads:
1180 1176 if h not in m:
1181 1177 unknown.append(h)
1182 1178 else:
1183 1179 base[h] = 1
1184 1180
1185 1181 if not unknown:
1186 1182 return []
1187 1183
1188 1184 req = dict.fromkeys(unknown)
1189 1185 reqcnt = 0
1190 1186
1191 1187 # search through remote branches
1192 1188 # a 'branch' here is a linear segment of history, with four parts:
1193 1189 # head, root, first parent, second parent
1194 1190 # (a branch always has two parents (or none) by definition)
1195 1191 unknown = remote.branches(unknown)
1196 1192 while unknown:
1197 1193 r = []
1198 1194 while unknown:
1199 1195 n = unknown.pop(0)
1200 1196 if n[0] in seen:
1201 1197 continue
1202 1198
1203 1199 self.ui.debug(_("examining %s:%s\n")
1204 1200 % (short(n[0]), short(n[1])))
1205 1201 if n[0] == nullid: # found the end of the branch
1206 1202 pass
1207 1203 elif n in seenbranch:
1208 1204 self.ui.debug(_("branch already found\n"))
1209 1205 continue
1210 1206 elif n[1] and n[1] in m: # do we know the base?
1211 1207 self.ui.debug(_("found incomplete branch %s:%s\n")
1212 1208 % (short(n[0]), short(n[1])))
1213 1209 search.append(n) # schedule branch range for scanning
1214 1210 seenbranch[n] = 1
1215 1211 else:
1216 1212 if n[1] not in seen and n[1] not in fetch:
1217 1213 if n[2] in m and n[3] in m:
1218 1214 self.ui.debug(_("found new changeset %s\n") %
1219 1215 short(n[1]))
1220 1216 fetch[n[1]] = 1 # earliest unknown
1221 1217 for p in n[2:4]:
1222 1218 if p in m:
1223 1219 base[p] = 1 # latest known
1224 1220
1225 1221 for p in n[2:4]:
1226 1222 if p not in req and p not in m:
1227 1223 r.append(p)
1228 1224 req[p] = 1
1229 1225 seen[n[0]] = 1
1230 1226
1231 1227 if r:
1232 1228 reqcnt += 1
1233 1229 self.ui.debug(_("request %d: %s\n") %
1234 1230 (reqcnt, " ".join(map(short, r))))
1235 1231 for p in xrange(0, len(r), 10):
1236 1232 for b in remote.branches(r[p:p+10]):
1237 1233 self.ui.debug(_("received %s:%s\n") %
1238 1234 (short(b[0]), short(b[1])))
1239 1235 unknown.append(b)
1240 1236
1241 1237 # do binary search on the branches we found
1242 1238 while search:
1243 1239 n = search.pop(0)
1244 1240 reqcnt += 1
1245 1241 l = remote.between([(n[0], n[1])])[0]
1246 1242 l.append(n[1])
1247 1243 p = n[0]
1248 1244 f = 1
1249 1245 for i in l:
1250 1246 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1251 1247 if i in m:
1252 1248 if f <= 2:
1253 1249 self.ui.debug(_("found new branch changeset %s\n") %
1254 1250 short(p))
1255 1251 fetch[p] = 1
1256 1252 base[i] = 1
1257 1253 else:
1258 1254 self.ui.debug(_("narrowed branch search to %s:%s\n")
1259 1255 % (short(p), short(i)))
1260 1256 search.append((p, i))
1261 1257 break
1262 1258 p, f = i, f * 2
1263 1259
1264 1260 # sanity check our fetch list
1265 1261 for f in fetch.keys():
1266 1262 if f in m:
1267 1263 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1268 1264
1269 1265 if base.keys() == [nullid]:
1270 1266 if force:
1271 1267 self.ui.warn(_("warning: repository is unrelated\n"))
1272 1268 else:
1273 1269 raise util.Abort(_("repository is unrelated"))
1274 1270
1275 1271 self.ui.debug(_("found new changesets starting at ") +
1276 1272 " ".join([short(f) for f in fetch]) + "\n")
1277 1273
1278 1274 self.ui.debug(_("%d total queries\n") % reqcnt)
1279 1275
1280 1276 return fetch.keys()
1281 1277
1282 1278 def findoutgoing(self, remote, base=None, heads=None, force=False):
1283 1279 """Return list of nodes that are roots of subsets not in remote
1284 1280
1285 1281 If base dict is specified, assume that these nodes and their parents
1286 1282 exist on the remote side.
1287 1283 If a list of heads is specified, return only nodes which are heads
1288 1284 or ancestors of these heads, and return a second element which
1289 1285 contains all remote heads which get new children.
1290 1286 """
1291 1287 if base == None:
1292 1288 base = {}
1293 1289 self.findincoming(remote, base, heads, force=force)
1294 1290
1295 1291 self.ui.debug(_("common changesets up to ")
1296 1292 + " ".join(map(short, base.keys())) + "\n")
1297 1293
1298 1294 remain = dict.fromkeys(self.changelog.nodemap)
1299 1295
1300 1296 # prune everything remote has from the tree
1301 1297 del remain[nullid]
1302 1298 remove = base.keys()
1303 1299 while remove:
1304 1300 n = remove.pop(0)
1305 1301 if n in remain:
1306 1302 del remain[n]
1307 1303 for p in self.changelog.parents(n):
1308 1304 remove.append(p)
1309 1305
1310 1306 # find every node whose parents have been pruned
1311 1307 subset = []
1312 1308 # find every remote head that will get new children
1313 1309 updated_heads = {}
1314 1310 for n in remain:
1315 1311 p1, p2 = self.changelog.parents(n)
1316 1312 if p1 not in remain and p2 not in remain:
1317 1313 subset.append(n)
1318 1314 if heads:
1319 1315 if p1 in heads:
1320 1316 updated_heads[p1] = True
1321 1317 if p2 in heads:
1322 1318 updated_heads[p2] = True
1323 1319
1324 1320 # this is the set of all roots we have to push
1325 1321 if heads:
1326 1322 return subset, updated_heads.keys()
1327 1323 else:
1328 1324 return subset
1329 1325
1330 1326 def pull(self, remote, heads=None, force=False, lock=None):
1331 1327 mylock = False
1332 1328 if not lock:
1333 1329 lock = self.lock()
1334 1330 mylock = True
1335 1331
1336 1332 try:
1337 1333 fetch = self.findincoming(remote, force=force)
1338 1334 if fetch == [nullid]:
1339 1335 self.ui.status(_("requesting all changes\n"))
1340 1336
1341 1337 if not fetch:
1342 1338 self.ui.status(_("no changes found\n"))
1343 1339 return 0
1344 1340
1345 1341 if heads is None:
1346 1342 cg = remote.changegroup(fetch, 'pull')
1347 1343 else:
1348 1344 if 'changegroupsubset' not in remote.capabilities:
1349 1345 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1350 1346 cg = remote.changegroupsubset(fetch, heads, 'pull')
1351 1347 return self.addchangegroup(cg, 'pull', remote.url())
1352 1348 finally:
1353 1349 if mylock:
1354 1350 lock.release()
1355 1351
1356 1352 def push(self, remote, force=False, revs=None):
1357 1353 # there are two ways to push to remote repo:
1358 1354 #
1359 1355 # addchangegroup assumes local user can lock remote
1360 1356 # repo (local filesystem, old ssh servers).
1361 1357 #
1362 1358 # unbundle assumes local user cannot lock remote repo (new ssh
1363 1359 # servers, http servers).
1364 1360
1365 1361 if remote.capable('unbundle'):
1366 1362 return self.push_unbundle(remote, force, revs)
1367 1363 return self.push_addchangegroup(remote, force, revs)
1368 1364
1369 1365 def prepush(self, remote, force, revs):
1370 1366 base = {}
1371 1367 remote_heads = remote.heads()
1372 1368 inc = self.findincoming(remote, base, remote_heads, force=force)
1373 1369
1374 1370 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1375 1371 if revs is not None:
1376 1372 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1377 1373 else:
1378 1374 bases, heads = update, self.changelog.heads()
1379 1375
1380 1376 if not bases:
1381 1377 self.ui.status(_("no changes found\n"))
1382 1378 return None, 1
1383 1379 elif not force:
1384 1380 # check if we're creating new remote heads
1385 1381 # to be a remote head after push, node must be either
1386 1382 # - unknown locally
1387 1383 # - a local outgoing head descended from update
1388 1384 # - a remote head that's known locally and not
1389 1385 # ancestral to an outgoing head
1390 1386
1391 1387 warn = 0
1392 1388
1393 1389 if remote_heads == [nullid]:
1394 1390 warn = 0
1395 1391 elif not revs and len(heads) > len(remote_heads):
1396 1392 warn = 1
1397 1393 else:
1398 1394 newheads = list(heads)
1399 1395 for r in remote_heads:
1400 1396 if r in self.changelog.nodemap:
1401 1397 desc = self.changelog.heads(r, heads)
1402 1398 l = [h for h in heads if h in desc]
1403 1399 if not l:
1404 1400 newheads.append(r)
1405 1401 else:
1406 1402 newheads.append(r)
1407 1403 if len(newheads) > len(remote_heads):
1408 1404 warn = 1
1409 1405
1410 1406 if warn:
1411 1407 self.ui.warn(_("abort: push creates new remote branches!\n"))
1412 1408 self.ui.status(_("(did you forget to merge?"
1413 1409 " use push -f to force)\n"))
1414 1410 return None, 1
1415 1411 elif inc:
1416 1412 self.ui.warn(_("note: unsynced remote changes!\n"))
1417 1413
1418 1414
1419 1415 if revs is None:
1420 1416 cg = self.changegroup(update, 'push')
1421 1417 else:
1422 1418 cg = self.changegroupsubset(update, revs, 'push')
1423 1419 return cg, remote_heads
1424 1420
1425 1421 def push_addchangegroup(self, remote, force, revs):
1426 1422 lock = remote.lock()
1427 1423
1428 1424 ret = self.prepush(remote, force, revs)
1429 1425 if ret[0] is not None:
1430 1426 cg, remote_heads = ret
1431 1427 return remote.addchangegroup(cg, 'push', self.url())
1432 1428 return ret[1]
1433 1429
1434 1430 def push_unbundle(self, remote, force, revs):
1435 1431 # local repo finds heads on server, finds out what revs it
1436 1432 # must push. once revs transferred, if server finds it has
1437 1433 # different heads (someone else won commit/push race), server
1438 1434 # aborts.
1439 1435
1440 1436 ret = self.prepush(remote, force, revs)
1441 1437 if ret[0] is not None:
1442 1438 cg, remote_heads = ret
1443 1439 if force: remote_heads = ['force']
1444 1440 return remote.unbundle(cg, remote_heads, 'push')
1445 1441 return ret[1]
1446 1442
1447 1443 def changegroupinfo(self, nodes):
1448 1444 self.ui.note(_("%d changesets found\n") % len(nodes))
1449 1445 if self.ui.debugflag:
1450 1446 self.ui.debug(_("List of changesets:\n"))
1451 1447 for node in nodes:
1452 1448 self.ui.debug("%s\n" % hex(node))
1453 1449
1454 1450 def changegroupsubset(self, bases, heads, source):
1455 1451 """This function generates a changegroup consisting of all the nodes
1456 1452 that are descendents of any of the bases, and ancestors of any of
1457 1453 the heads.
1458 1454
1459 1455 It is fairly complex as determining which filenodes and which
1460 1456 manifest nodes need to be included for the changeset to be complete
1461 1457 is non-trivial.
1462 1458
1463 1459 Another wrinkle is doing the reverse, figuring out which changeset in
1464 1460 the changegroup a particular filenode or manifestnode belongs to."""
1465 1461
1466 1462 self.hook('preoutgoing', throw=True, source=source)
1467 1463
1468 1464 # Set up some initial variables
1469 1465 # Make it easy to refer to self.changelog
1470 1466 cl = self.changelog
1471 1467 # msng is short for missing - compute the list of changesets in this
1472 1468 # changegroup.
1473 1469 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1474 1470 self.changegroupinfo(msng_cl_lst)
1475 1471 # Some bases may turn out to be superfluous, and some heads may be
1476 1472 # too. nodesbetween will return the minimal set of bases and heads
1477 1473 # necessary to re-create the changegroup.
1478 1474
1479 1475 # Known heads are the list of heads that it is assumed the recipient
1480 1476 # of this changegroup will know about.
1481 1477 knownheads = {}
1482 1478 # We assume that all parents of bases are known heads.
1483 1479 for n in bases:
1484 1480 for p in cl.parents(n):
1485 1481 if p != nullid:
1486 1482 knownheads[p] = 1
1487 1483 knownheads = knownheads.keys()
1488 1484 if knownheads:
1489 1485 # Now that we know what heads are known, we can compute which
1490 1486 # changesets are known. The recipient must know about all
1491 1487 # changesets required to reach the known heads from the null
1492 1488 # changeset.
1493 1489 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1494 1490 junk = None
1495 1491 # Transform the list into an ersatz set.
1496 1492 has_cl_set = dict.fromkeys(has_cl_set)
1497 1493 else:
1498 1494 # If there were no known heads, the recipient cannot be assumed to
1499 1495 # know about any changesets.
1500 1496 has_cl_set = {}
1501 1497
1502 1498 # Make it easy to refer to self.manifest
1503 1499 mnfst = self.manifest
1504 1500 # We don't know which manifests are missing yet
1505 1501 msng_mnfst_set = {}
1506 1502 # Nor do we know which filenodes are missing.
1507 1503 msng_filenode_set = {}
1508 1504
1509 1505 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1510 1506 junk = None
1511 1507
1512 1508 # A changeset always belongs to itself, so the changenode lookup
1513 1509 # function for a changenode is identity.
1514 1510 def identity(x):
1515 1511 return x
1516 1512
1517 1513 # A function generating function. Sets up an environment for the
1518 1514 # inner function.
1519 1515 def cmp_by_rev_func(revlog):
1520 1516 # Compare two nodes by their revision number in the environment's
1521 1517 # revision history. Since the revision number both represents the
1522 1518 # most efficient order to read the nodes in, and represents a
1523 1519 # topological sorting of the nodes, this function is often useful.
1524 1520 def cmp_by_rev(a, b):
1525 1521 return cmp(revlog.rev(a), revlog.rev(b))
1526 1522 return cmp_by_rev
1527 1523
1528 1524 # If we determine that a particular file or manifest node must be a
1529 1525 # node that the recipient of the changegroup will already have, we can
1530 1526 # also assume the recipient will have all the parents. This function
1531 1527 # prunes them from the set of missing nodes.
1532 1528 def prune_parents(revlog, hasset, msngset):
1533 1529 haslst = hasset.keys()
1534 1530 haslst.sort(cmp_by_rev_func(revlog))
1535 1531 for node in haslst:
1536 1532 parentlst = [p for p in revlog.parents(node) if p != nullid]
1537 1533 while parentlst:
1538 1534 n = parentlst.pop()
1539 1535 if n not in hasset:
1540 1536 hasset[n] = 1
1541 1537 p = [p for p in revlog.parents(n) if p != nullid]
1542 1538 parentlst.extend(p)
1543 1539 for n in hasset:
1544 1540 msngset.pop(n, None)
1545 1541
1546 1542 # This is a function generating function used to set up an environment
1547 1543 # for the inner function to execute in.
1548 1544 def manifest_and_file_collector(changedfileset):
1549 1545 # This is an information gathering function that gathers
1550 1546 # information from each changeset node that goes out as part of
1551 1547 # the changegroup. The information gathered is a list of which
1552 1548 # manifest nodes are potentially required (the recipient may
1553 1549 # already have them) and total list of all files which were
1554 1550 # changed in any changeset in the changegroup.
1555 1551 #
1556 1552 # We also remember the first changenode we saw any manifest
1557 1553 # referenced by so we can later determine which changenode 'owns'
1558 1554 # the manifest.
1559 1555 def collect_manifests_and_files(clnode):
1560 1556 c = cl.read(clnode)
1561 1557 for f in c[3]:
1562 1558 # This is to make sure we only have one instance of each
1563 1559 # filename string for each filename.
1564 1560 changedfileset.setdefault(f, f)
1565 1561 msng_mnfst_set.setdefault(c[0], clnode)
1566 1562 return collect_manifests_and_files
1567 1563
1568 1564 # Figure out which manifest nodes (of the ones we think might be part
1569 1565 # of the changegroup) the recipient must know about and remove them
1570 1566 # from the changegroup.
1571 1567 def prune_manifests():
1572 1568 has_mnfst_set = {}
1573 1569 for n in msng_mnfst_set:
1574 1570 # If a 'missing' manifest thinks it belongs to a changenode
1575 1571 # the recipient is assumed to have, obviously the recipient
1576 1572 # must have that manifest.
1577 1573 linknode = cl.node(mnfst.linkrev(n))
1578 1574 if linknode in has_cl_set:
1579 1575 has_mnfst_set[n] = 1
1580 1576 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1581 1577
1582 1578 # Use the information collected in collect_manifests_and_files to say
1583 1579 # which changenode any manifestnode belongs to.
1584 1580 def lookup_manifest_link(mnfstnode):
1585 1581 return msng_mnfst_set[mnfstnode]
1586 1582
1587 1583 # A function generating function that sets up the initial environment
1588 1584 # the inner function.
1589 1585 def filenode_collector(changedfiles):
1590 1586 next_rev = [0]
1591 1587 # This gathers information from each manifestnode included in the
1592 1588 # changegroup about which filenodes the manifest node references
1593 1589 # so we can include those in the changegroup too.
1594 1590 #
1595 1591 # It also remembers which changenode each filenode belongs to. It
1596 1592 # does this by assuming the a filenode belongs to the changenode
1597 1593 # the first manifest that references it belongs to.
1598 1594 def collect_msng_filenodes(mnfstnode):
1599 1595 r = mnfst.rev(mnfstnode)
1600 1596 if r == next_rev[0]:
1601 1597 # If the last rev we looked at was the one just previous,
1602 1598 # we only need to see a diff.
1603 1599 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1604 1600 # For each line in the delta
1605 1601 for dline in delta.splitlines():
1606 1602 # get the filename and filenode for that line
1607 1603 f, fnode = dline.split('\0')
1608 1604 fnode = bin(fnode[:40])
1609 1605 f = changedfiles.get(f, None)
1610 1606 # And if the file is in the list of files we care
1611 1607 # about.
1612 1608 if f is not None:
1613 1609 # Get the changenode this manifest belongs to
1614 1610 clnode = msng_mnfst_set[mnfstnode]
1615 1611 # Create the set of filenodes for the file if
1616 1612 # there isn't one already.
1617 1613 ndset = msng_filenode_set.setdefault(f, {})
1618 1614 # And set the filenode's changelog node to the
1619 1615 # manifest's if it hasn't been set already.
1620 1616 ndset.setdefault(fnode, clnode)
1621 1617 else:
1622 1618 # Otherwise we need a full manifest.
1623 1619 m = mnfst.read(mnfstnode)
1624 1620 # For every file in we care about.
1625 1621 for f in changedfiles:
1626 1622 fnode = m.get(f, None)
1627 1623 # If it's in the manifest
1628 1624 if fnode is not None:
1629 1625 # See comments above.
1630 1626 clnode = msng_mnfst_set[mnfstnode]
1631 1627 ndset = msng_filenode_set.setdefault(f, {})
1632 1628 ndset.setdefault(fnode, clnode)
1633 1629 # Remember the revision we hope to see next.
1634 1630 next_rev[0] = r + 1
1635 1631 return collect_msng_filenodes
1636 1632
1637 1633 # We have a list of filenodes we think we need for a file, lets remove
1638 1634 # all those we now the recipient must have.
1639 1635 def prune_filenodes(f, filerevlog):
1640 1636 msngset = msng_filenode_set[f]
1641 1637 hasset = {}
1642 1638 # If a 'missing' filenode thinks it belongs to a changenode we
1643 1639 # assume the recipient must have, then the recipient must have
1644 1640 # that filenode.
1645 1641 for n in msngset:
1646 1642 clnode = cl.node(filerevlog.linkrev(n))
1647 1643 if clnode in has_cl_set:
1648 1644 hasset[n] = 1
1649 1645 prune_parents(filerevlog, hasset, msngset)
1650 1646
1651 1647 # A function generator function that sets up the a context for the
1652 1648 # inner function.
1653 1649 def lookup_filenode_link_func(fname):
1654 1650 msngset = msng_filenode_set[fname]
1655 1651 # Lookup the changenode the filenode belongs to.
1656 1652 def lookup_filenode_link(fnode):
1657 1653 return msngset[fnode]
1658 1654 return lookup_filenode_link
1659 1655
1660 1656 # Now that we have all theses utility functions to help out and
1661 1657 # logically divide up the task, generate the group.
1662 1658 def gengroup():
1663 1659 # The set of changed files starts empty.
1664 1660 changedfiles = {}
1665 1661 # Create a changenode group generator that will call our functions
1666 1662 # back to lookup the owning changenode and collect information.
1667 1663 group = cl.group(msng_cl_lst, identity,
1668 1664 manifest_and_file_collector(changedfiles))
1669 1665 for chnk in group:
1670 1666 yield chnk
1671 1667
1672 1668 # The list of manifests has been collected by the generator
1673 1669 # calling our functions back.
1674 1670 prune_manifests()
1675 1671 msng_mnfst_lst = msng_mnfst_set.keys()
1676 1672 # Sort the manifestnodes by revision number.
1677 1673 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1678 1674 # Create a generator for the manifestnodes that calls our lookup
1679 1675 # and data collection functions back.
1680 1676 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1681 1677 filenode_collector(changedfiles))
1682 1678 for chnk in group:
1683 1679 yield chnk
1684 1680
1685 1681 # These are no longer needed, dereference and toss the memory for
1686 1682 # them.
1687 1683 msng_mnfst_lst = None
1688 1684 msng_mnfst_set.clear()
1689 1685
1690 1686 changedfiles = changedfiles.keys()
1691 1687 changedfiles.sort()
1692 1688 # Go through all our files in order sorted by name.
1693 1689 for fname in changedfiles:
1694 1690 filerevlog = self.file(fname)
1695 1691 # Toss out the filenodes that the recipient isn't really
1696 1692 # missing.
1697 1693 if msng_filenode_set.has_key(fname):
1698 1694 prune_filenodes(fname, filerevlog)
1699 1695 msng_filenode_lst = msng_filenode_set[fname].keys()
1700 1696 else:
1701 1697 msng_filenode_lst = []
1702 1698 # If any filenodes are left, generate the group for them,
1703 1699 # otherwise don't bother.
1704 1700 if len(msng_filenode_lst) > 0:
1705 1701 yield changegroup.genchunk(fname)
1706 1702 # Sort the filenodes by their revision #
1707 1703 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1708 1704 # Create a group generator and only pass in a changenode
1709 1705 # lookup function as we need to collect no information
1710 1706 # from filenodes.
1711 1707 group = filerevlog.group(msng_filenode_lst,
1712 1708 lookup_filenode_link_func(fname))
1713 1709 for chnk in group:
1714 1710 yield chnk
1715 1711 if msng_filenode_set.has_key(fname):
1716 1712 # Don't need this anymore, toss it to free memory.
1717 1713 del msng_filenode_set[fname]
1718 1714 # Signal that no more groups are left.
1719 1715 yield changegroup.closechunk()
1720 1716
1721 1717 if msng_cl_lst:
1722 1718 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1723 1719
1724 1720 return util.chunkbuffer(gengroup())
1725 1721
1726 1722 def changegroup(self, basenodes, source):
1727 1723 """Generate a changegroup of all nodes that we have that a recipient
1728 1724 doesn't.
1729 1725
1730 1726 This is much easier than the previous function as we can assume that
1731 1727 the recipient has any changenode we aren't sending them."""
1732 1728
1733 1729 self.hook('preoutgoing', throw=True, source=source)
1734 1730
1735 1731 cl = self.changelog
1736 1732 nodes = cl.nodesbetween(basenodes, None)[0]
1737 1733 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1738 1734 self.changegroupinfo(nodes)
1739 1735
1740 1736 def identity(x):
1741 1737 return x
1742 1738
1743 1739 def gennodelst(revlog):
1744 1740 for r in xrange(0, revlog.count()):
1745 1741 n = revlog.node(r)
1746 1742 if revlog.linkrev(n) in revset:
1747 1743 yield n
1748 1744
1749 1745 def changed_file_collector(changedfileset):
1750 1746 def collect_changed_files(clnode):
1751 1747 c = cl.read(clnode)
1752 1748 for fname in c[3]:
1753 1749 changedfileset[fname] = 1
1754 1750 return collect_changed_files
1755 1751
1756 1752 def lookuprevlink_func(revlog):
1757 1753 def lookuprevlink(n):
1758 1754 return cl.node(revlog.linkrev(n))
1759 1755 return lookuprevlink
1760 1756
1761 1757 def gengroup():
1762 1758 # construct a list of all changed files
1763 1759 changedfiles = {}
1764 1760
1765 1761 for chnk in cl.group(nodes, identity,
1766 1762 changed_file_collector(changedfiles)):
1767 1763 yield chnk
1768 1764 changedfiles = changedfiles.keys()
1769 1765 changedfiles.sort()
1770 1766
1771 1767 mnfst = self.manifest
1772 1768 nodeiter = gennodelst(mnfst)
1773 1769 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1774 1770 yield chnk
1775 1771
1776 1772 for fname in changedfiles:
1777 1773 filerevlog = self.file(fname)
1778 1774 nodeiter = gennodelst(filerevlog)
1779 1775 nodeiter = list(nodeiter)
1780 1776 if nodeiter:
1781 1777 yield changegroup.genchunk(fname)
1782 1778 lookup = lookuprevlink_func(filerevlog)
1783 1779 for chnk in filerevlog.group(nodeiter, lookup):
1784 1780 yield chnk
1785 1781
1786 1782 yield changegroup.closechunk()
1787 1783
1788 1784 if nodes:
1789 1785 self.hook('outgoing', node=hex(nodes[0]), source=source)
1790 1786
1791 1787 return util.chunkbuffer(gengroup())
1792 1788
1793 1789 def addchangegroup(self, source, srctype, url):
1794 1790 """add changegroup to repo.
1795 1791
1796 1792 return values:
1797 1793 - nothing changed or no source: 0
1798 1794 - more heads than before: 1+added heads (2..n)
1799 1795 - less heads than before: -1-removed heads (-2..-n)
1800 1796 - number of heads stays the same: 1
1801 1797 """
1802 1798 def csmap(x):
1803 1799 self.ui.debug(_("add changeset %s\n") % short(x))
1804 1800 return cl.count()
1805 1801
1806 1802 def revmap(x):
1807 1803 return cl.rev(x)
1808 1804
1809 1805 if not source:
1810 1806 return 0
1811 1807
1812 1808 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1813 1809
1814 1810 changesets = files = revisions = 0
1815 1811
1816 1812 tr = self.transaction()
1817 1813
1818 1814 # write changelog data to temp files so concurrent readers will not see
1819 1815 # inconsistent view
1820 1816 cl = self.changelog
1821 1817 cl.delayupdate()
1822 1818 oldheads = len(cl.heads())
1823 1819
1824 1820 # pull off the changeset group
1825 1821 self.ui.status(_("adding changesets\n"))
1826 1822 cor = cl.count() - 1
1827 1823 chunkiter = changegroup.chunkiter(source)
1828 1824 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1829 1825 raise util.Abort(_("received changelog group is empty"))
1830 1826 cnr = cl.count() - 1
1831 1827 changesets = cnr - cor
1832 1828
1833 1829 # pull off the manifest group
1834 1830 self.ui.status(_("adding manifests\n"))
1835 1831 chunkiter = changegroup.chunkiter(source)
1836 1832 # no need to check for empty manifest group here:
1837 1833 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1838 1834 # no new manifest will be created and the manifest group will
1839 1835 # be empty during the pull
1840 1836 self.manifest.addgroup(chunkiter, revmap, tr)
1841 1837
1842 1838 # process the files
1843 1839 self.ui.status(_("adding file changes\n"))
1844 1840 while 1:
1845 1841 f = changegroup.getchunk(source)
1846 1842 if not f:
1847 1843 break
1848 1844 self.ui.debug(_("adding %s revisions\n") % f)
1849 1845 fl = self.file(f)
1850 1846 o = fl.count()
1851 1847 chunkiter = changegroup.chunkiter(source)
1852 1848 if fl.addgroup(chunkiter, revmap, tr) is None:
1853 1849 raise util.Abort(_("received file revlog group is empty"))
1854 1850 revisions += fl.count() - o
1855 1851 files += 1
1856 1852
1857 1853 # make changelog see real files again
1858 1854 cl.finalize(tr)
1859 1855
1860 1856 newheads = len(self.changelog.heads())
1861 1857 heads = ""
1862 1858 if oldheads and newheads != oldheads:
1863 1859 heads = _(" (%+d heads)") % (newheads - oldheads)
1864 1860
1865 1861 self.ui.status(_("added %d changesets"
1866 1862 " with %d changes to %d files%s\n")
1867 1863 % (changesets, revisions, files, heads))
1868 1864
1869 1865 if changesets > 0:
1870 1866 self.hook('pretxnchangegroup', throw=True,
1871 1867 node=hex(self.changelog.node(cor+1)), source=srctype,
1872 1868 url=url)
1873 1869
1874 1870 tr.close()
1875 1871
1876 1872 if changesets > 0:
1877 1873 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1878 1874 source=srctype, url=url)
1879 1875
1880 1876 for i in xrange(cor + 1, cnr + 1):
1881 1877 self.hook("incoming", node=hex(self.changelog.node(i)),
1882 1878 source=srctype, url=url)
1883 1879
1884 1880 # never return 0 here:
1885 1881 if newheads < oldheads:
1886 1882 return newheads - oldheads - 1
1887 1883 else:
1888 1884 return newheads - oldheads + 1
1889 1885
1890 1886
1891 1887 def stream_in(self, remote):
1892 1888 fp = remote.stream_out()
1893 1889 l = fp.readline()
1894 1890 try:
1895 1891 resp = int(l)
1896 1892 except ValueError:
1897 1893 raise util.UnexpectedOutput(
1898 1894 _('Unexpected response from remote server:'), l)
1899 1895 if resp == 1:
1900 1896 raise util.Abort(_('operation forbidden by server'))
1901 1897 elif resp == 2:
1902 1898 raise util.Abort(_('locking the remote repository failed'))
1903 1899 elif resp != 0:
1904 1900 raise util.Abort(_('the server sent an unknown error code'))
1905 1901 self.ui.status(_('streaming all changes\n'))
1906 1902 l = fp.readline()
1907 1903 try:
1908 1904 total_files, total_bytes = map(int, l.split(' ', 1))
1909 1905 except ValueError, TypeError:
1910 1906 raise util.UnexpectedOutput(
1911 1907 _('Unexpected response from remote server:'), l)
1912 1908 self.ui.status(_('%d files to transfer, %s of data\n') %
1913 1909 (total_files, util.bytecount(total_bytes)))
1914 1910 start = time.time()
1915 1911 for i in xrange(total_files):
1916 1912 # XXX doesn't support '\n' or '\r' in filenames
1917 1913 l = fp.readline()
1918 1914 try:
1919 1915 name, size = l.split('\0', 1)
1920 1916 size = int(size)
1921 1917 except ValueError, TypeError:
1922 1918 raise util.UnexpectedOutput(
1923 1919 _('Unexpected response from remote server:'), l)
1924 1920 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1925 1921 ofp = self.sopener(name, 'w')
1926 1922 for chunk in util.filechunkiter(fp, limit=size):
1927 1923 ofp.write(chunk)
1928 1924 ofp.close()
1929 1925 elapsed = time.time() - start
1930 1926 if elapsed <= 0:
1931 1927 elapsed = 0.001
1932 1928 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1933 1929 (util.bytecount(total_bytes), elapsed,
1934 1930 util.bytecount(total_bytes / elapsed)))
1935 1931 self.invalidate()
1936 1932 return len(self.heads()) + 1
1937 1933
1938 1934 def clone(self, remote, heads=[], stream=False):
1939 1935 '''clone remote repository.
1940 1936
1941 1937 keyword arguments:
1942 1938 heads: list of revs to clone (forces use of pull)
1943 1939 stream: use streaming clone if possible'''
1944 1940
1945 1941 # now, all clients that can request uncompressed clones can
1946 1942 # read repo formats supported by all servers that can serve
1947 1943 # them.
1948 1944
1949 1945 # if revlog format changes, client will have to check version
1950 1946 # and format flags on "stream" capability, and use
1951 1947 # uncompressed only if compatible.
1952 1948
1953 1949 if stream and not heads and remote.capable('stream'):
1954 1950 return self.stream_in(remote)
1955 1951 return self.pull(remote, heads)
1956 1952
1957 1953 # used to avoid circular references so destructors work
1958 1954 def aftertrans(files):
1959 1955 renamefiles = [tuple(t) for t in files]
1960 1956 def a():
1961 1957 for src, dest in renamefiles:
1962 1958 util.rename(src, dest)
1963 1959 return a
1964 1960
1965 1961 def instance(ui, path, create):
1966 1962 return localrepository(ui, util.drop_scheme('file', path), create)
1967 1963
1968 1964 def islocal(path):
1969 1965 return True
@@ -1,21 +1,21
1 1 uisetup called
2 ui.parentui is None
2 ui.parentui isnot None
3 3 reposetup called for a
4 4 ui == repo.ui
5 5 Foo
6 6 uisetup called
7 7 ui.parentui is None
8 8 reposetup called for a
9 9 ui == repo.ui
10 10 reposetup called for b
11 11 ui == repo.ui
12 12 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
13 13 uisetup called
14 14 ui.parentui is None
15 15 Bar
16 16 % module/__init__.py-style
17 17 uisetup called
18 ui.parentui is None
18 ui.parentui isnot None
19 19 reposetup called for a
20 20 ui == repo.ui
21 21 Foo
General Comments 0
You need to be logged in to leave comments. Login now