##// END OF EJS Templates
Corrected "waiting for lock on repository FOO held by BAR" message....
Thomas Arendsen Hein -
r3688:d92dad35 default
parent child Browse files
Show More
@@ -1,1896 +1,1896
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.realpath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.sopener = util.opener(self.path)
51 51 self.wopener = util.opener(self.root)
52 52
53 53 try:
54 54 self.ui.readconfig(self.join("hgrc"), self.root)
55 55 except IOError:
56 56 pass
57 57
58 58 v = self.ui.configrevlog()
59 59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 61 fl = v.get('flags', None)
62 62 flags = 0
63 63 if fl != None:
64 64 for x in fl.split():
65 65 flags |= revlog.flagstr(x)
66 66 elif self.revlogv1:
67 67 flags = revlog.REVLOG_DEFAULT_FLAGS
68 68
69 69 v = self.revlogversion | flags
70 70 self.manifest = manifest.manifest(self.sopener, v)
71 71 self.changelog = changelog.changelog(self.sopener, v)
72 72
73 73 # the changelog might not have the inline index flag
74 74 # on. If the format of the changelog is the same as found in
75 75 # .hgrc, apply any flags found in the .hgrc as well.
76 76 # Otherwise, just version from the changelog
77 77 v = self.changelog.version
78 78 if v == self.revlogversion:
79 79 v |= flags
80 80 self.revlogversion = v
81 81
82 82 self.tagscache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.encodepats = None
86 86 self.decodepats = None
87 87 self.transhandle = None
88 88
89 89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90 90
91 91 def url(self):
92 92 return 'file:' + self.root
93 93
94 94 def hook(self, name, throw=False, **args):
95 95 def callhook(hname, funcname):
96 96 '''call python hook. hook is callable object, looked up as
97 97 name in python module. if callable returns "true", hook
98 98 fails, else passes. if hook raises exception, treated as
99 99 hook failure. exception propagates if throw is "true".
100 100
101 101 reason for "true" meaning "hook failed" is so that
102 102 unmodified commands (e.g. mercurial.commands.update) can
103 103 be run as hooks without wrappers to convert return values.'''
104 104
105 105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 106 d = funcname.rfind('.')
107 107 if d == -1:
108 108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 109 % (hname, funcname))
110 110 modname = funcname[:d]
111 111 try:
112 112 obj = __import__(modname)
113 113 except ImportError:
114 114 try:
115 115 # extensions are loaded with hgext_ prefix
116 116 obj = __import__("hgext_%s" % modname)
117 117 except ImportError:
118 118 raise util.Abort(_('%s hook is invalid '
119 119 '(import of "%s" failed)') %
120 120 (hname, modname))
121 121 try:
122 122 for p in funcname.split('.')[1:]:
123 123 obj = getattr(obj, p)
124 124 except AttributeError, err:
125 125 raise util.Abort(_('%s hook is invalid '
126 126 '("%s" is not defined)') %
127 127 (hname, funcname))
128 128 if not callable(obj):
129 129 raise util.Abort(_('%s hook is invalid '
130 130 '("%s" is not callable)') %
131 131 (hname, funcname))
132 132 try:
133 133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 134 except (KeyboardInterrupt, util.SignalInterrupt):
135 135 raise
136 136 except Exception, exc:
137 137 if isinstance(exc, util.Abort):
138 138 self.ui.warn(_('error: %s hook failed: %s\n') %
139 139 (hname, exc.args[0]))
140 140 else:
141 141 self.ui.warn(_('error: %s hook raised an exception: '
142 142 '%s\n') % (hname, exc))
143 143 if throw:
144 144 raise
145 145 self.ui.print_exc()
146 146 return True
147 147 if r:
148 148 if throw:
149 149 raise util.Abort(_('%s hook failed') % hname)
150 150 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 151 return r
152 152
153 153 def runhook(name, cmd):
154 154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 156 r = util.system(cmd, environ=env, cwd=self.root)
157 157 if r:
158 158 desc, r = util.explain_exit(r)
159 159 if throw:
160 160 raise util.Abort(_('%s hook %s') % (name, desc))
161 161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 162 return r
163 163
164 164 r = False
165 165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 166 if hname.split(".", 1)[0] == name and cmd]
167 167 hooks.sort()
168 168 for hname, cmd in hooks:
169 169 if cmd.startswith('python:'):
170 170 r = callhook(hname, cmd[7:].strip()) or r
171 171 else:
172 172 r = runhook(hname, cmd) or r
173 173 return r
174 174
175 175 tag_disallowed = ':\r\n'
176 176
177 177 def tag(self, name, node, message, local, user, date):
178 178 '''tag a revision with a symbolic name.
179 179
180 180 if local is True, the tag is stored in a per-repository file.
181 181 otherwise, it is stored in the .hgtags file, and a new
182 182 changeset is committed with the change.
183 183
184 184 keyword arguments:
185 185
186 186 local: whether to store tag in non-version-controlled file
187 187 (default False)
188 188
189 189 message: commit message to use if committing
190 190
191 191 user: name of user to use if committing
192 192
193 193 date: date tuple to use if committing'''
194 194
195 195 for c in self.tag_disallowed:
196 196 if c in name:
197 197 raise util.Abort(_('%r cannot be used in a tag name') % c)
198 198
199 199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200 200
201 201 if local:
202 202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 203 self.hook('tag', node=hex(node), tag=name, local=local)
204 204 return
205 205
206 206 for x in self.status()[:5]:
207 207 if '.hgtags' in x:
208 208 raise util.Abort(_('working copy of .hgtags is changed '
209 209 '(please commit .hgtags manually)'))
210 210
211 211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 212 if self.dirstate.state('.hgtags') == '?':
213 213 self.add(['.hgtags'])
214 214
215 215 self.commit(['.hgtags'], message, user, date)
216 216 self.hook('tag', node=hex(node), tag=name, local=local)
217 217
218 218 def tags(self):
219 219 '''return a mapping of tag to node'''
220 220 if not self.tagscache:
221 221 self.tagscache = {}
222 222
223 223 def parsetag(line, context):
224 224 if not line:
225 225 return
226 226 s = l.split(" ", 1)
227 227 if len(s) != 2:
228 228 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 229 return
230 230 node, key = s
231 231 key = key.strip()
232 232 try:
233 233 bin_n = bin(node)
234 234 except TypeError:
235 235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 236 (context, node))
237 237 return
238 238 if bin_n not in self.changelog.nodemap:
239 239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 240 (context, key))
241 241 return
242 242 self.tagscache[key] = bin_n
243 243
244 244 # read the tags file from each head, ending with the tip,
245 245 # and add each tag found to the map, with "newer" ones
246 246 # taking precedence
247 247 f = None
248 248 for rev, node, fnode in self._hgtagsnodes():
249 249 f = (f and f.filectx(fnode) or
250 250 self.filectx('.hgtags', fileid=fnode))
251 251 count = 0
252 252 for l in f.data().splitlines():
253 253 count += 1
254 254 parsetag(l, _("%s, line %d") % (str(f), count))
255 255
256 256 try:
257 257 f = self.opener("localtags")
258 258 count = 0
259 259 for l in f:
260 260 count += 1
261 261 parsetag(l, _("localtags, line %d") % count)
262 262 except IOError:
263 263 pass
264 264
265 265 self.tagscache['tip'] = self.changelog.tip()
266 266
267 267 return self.tagscache
268 268
269 269 def _hgtagsnodes(self):
270 270 heads = self.heads()
271 271 heads.reverse()
272 272 last = {}
273 273 ret = []
274 274 for node in heads:
275 275 c = self.changectx(node)
276 276 rev = c.rev()
277 277 try:
278 278 fnode = c.filenode('.hgtags')
279 279 except repo.LookupError:
280 280 continue
281 281 ret.append((rev, node, fnode))
282 282 if fnode in last:
283 283 ret[last[fnode]] = None
284 284 last[fnode] = len(ret) - 1
285 285 return [item for item in ret if item]
286 286
287 287 def tagslist(self):
288 288 '''return a list of tags ordered by revision'''
289 289 l = []
290 290 for t, n in self.tags().items():
291 291 try:
292 292 r = self.changelog.rev(n)
293 293 except:
294 294 r = -2 # sort to the beginning of the list if unknown
295 295 l.append((r, t, n))
296 296 l.sort()
297 297 return [(t, n) for r, t, n in l]
298 298
299 299 def nodetags(self, node):
300 300 '''return the tags associated with a node'''
301 301 if not self.nodetagscache:
302 302 self.nodetagscache = {}
303 303 for t, n in self.tags().items():
304 304 self.nodetagscache.setdefault(n, []).append(t)
305 305 return self.nodetagscache.get(node, [])
306 306
307 307 def branchtags(self):
308 308 if self.branchcache != None:
309 309 return self.branchcache
310 310
311 311 self.branchcache = {} # avoid recursion in changectx
312 312
313 313 partial, last, lrev = self._readbranchcache()
314 314
315 315 tiprev = self.changelog.count() - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 self.branchcache = partial
321 321 return self.branchcache
322 322
323 323 def _readbranchcache(self):
324 324 partial = {}
325 325 try:
326 326 f = self.opener("branches.cache")
327 327 lines = f.read().split('\n')
328 328 f.close()
329 329 last, lrev = lines.pop(0).rstrip().split(" ", 1)
330 330 last, lrev = bin(last), int(lrev)
331 331 if (lrev < self.changelog.count() and
332 332 self.changelog.node(lrev) == last): # sanity check
333 333 for l in lines:
334 334 if not l: continue
335 335 node, label = l.rstrip().split(" ", 1)
336 336 partial[label] = bin(node)
337 337 else: # invalidate the cache
338 338 last, lrev = nullid, nullrev
339 339 except IOError:
340 340 last, lrev = nullid, nullrev
341 341 return partial, last, lrev
342 342
343 343 def _writebranchcache(self, branches, tip, tiprev):
344 344 try:
345 345 f = self.opener("branches.cache", "w")
346 346 f.write("%s %s\n" % (hex(tip), tiprev))
347 347 for label, node in branches.iteritems():
348 348 f.write("%s %s\n" % (hex(node), label))
349 349 except IOError:
350 350 pass
351 351
352 352 def _updatebranchcache(self, partial, start, end):
353 353 for r in xrange(start, end):
354 354 c = self.changectx(r)
355 355 b = c.branch()
356 356 if b:
357 357 partial[b] = c.node()
358 358
359 359 def lookup(self, key):
360 360 if key == '.':
361 361 key = self.dirstate.parents()[0]
362 362 if key == nullid:
363 363 raise repo.RepoError(_("no revision checked out"))
364 364 n = self.changelog._match(key)
365 365 if n:
366 366 return n
367 367 if key in self.tags():
368 368 return self.tags()[key]
369 369 if key in self.branchtags():
370 370 return self.branchtags()[key]
371 371 n = self.changelog._partialmatch(key)
372 372 if n:
373 373 return n
374 374 raise repo.RepoError(_("unknown revision '%s'") % key)
375 375
376 376 def dev(self):
377 377 return os.lstat(self.path).st_dev
378 378
379 379 def local(self):
380 380 return True
381 381
382 382 def join(self, f):
383 383 return os.path.join(self.path, f)
384 384
385 385 def sjoin(self, f):
386 386 return os.path.join(self.path, f)
387 387
388 388 def wjoin(self, f):
389 389 return os.path.join(self.root, f)
390 390
391 391 def file(self, f):
392 392 if f[0] == '/':
393 393 f = f[1:]
394 394 return filelog.filelog(self.sopener, f, self.revlogversion)
395 395
396 396 def changectx(self, changeid=None):
397 397 return context.changectx(self, changeid)
398 398
399 399 def workingctx(self):
400 400 return context.workingctx(self)
401 401
402 402 def parents(self, changeid=None):
403 403 '''
404 404 get list of changectxs for parents of changeid or working directory
405 405 '''
406 406 if changeid is None:
407 407 pl = self.dirstate.parents()
408 408 else:
409 409 n = self.changelog.lookup(changeid)
410 410 pl = self.changelog.parents(n)
411 411 if pl[1] == nullid:
412 412 return [self.changectx(pl[0])]
413 413 return [self.changectx(pl[0]), self.changectx(pl[1])]
414 414
415 415 def filectx(self, path, changeid=None, fileid=None):
416 416 """changeid can be a changeset revision, node, or tag.
417 417 fileid can be a file revision or node."""
418 418 return context.filectx(self, path, changeid, fileid)
419 419
420 420 def getcwd(self):
421 421 return self.dirstate.getcwd()
422 422
423 423 def wfile(self, f, mode='r'):
424 424 return self.wopener(f, mode)
425 425
426 426 def wread(self, filename):
427 427 if self.encodepats == None:
428 428 l = []
429 429 for pat, cmd in self.ui.configitems("encode"):
430 430 mf = util.matcher(self.root, "", [pat], [], [])[1]
431 431 l.append((mf, cmd))
432 432 self.encodepats = l
433 433
434 434 data = self.wopener(filename, 'r').read()
435 435
436 436 for mf, cmd in self.encodepats:
437 437 if mf(filename):
438 438 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
439 439 data = util.filter(data, cmd)
440 440 break
441 441
442 442 return data
443 443
444 444 def wwrite(self, filename, data, fd=None):
445 445 if self.decodepats == None:
446 446 l = []
447 447 for pat, cmd in self.ui.configitems("decode"):
448 448 mf = util.matcher(self.root, "", [pat], [], [])[1]
449 449 l.append((mf, cmd))
450 450 self.decodepats = l
451 451
452 452 for mf, cmd in self.decodepats:
453 453 if mf(filename):
454 454 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
455 455 data = util.filter(data, cmd)
456 456 break
457 457
458 458 if fd:
459 459 return fd.write(data)
460 460 return self.wopener(filename, 'w').write(data)
461 461
462 462 def transaction(self):
463 463 tr = self.transhandle
464 464 if tr != None and tr.running():
465 465 return tr.nest()
466 466
467 467 # save dirstate for rollback
468 468 try:
469 469 ds = self.opener("dirstate").read()
470 470 except IOError:
471 471 ds = ""
472 472 self.opener("journal.dirstate", "w").write(ds)
473 473
474 474 tr = transaction.transaction(self.ui.warn, self.sopener,
475 475 self.sjoin("journal"),
476 476 aftertrans(self.path))
477 477 self.transhandle = tr
478 478 return tr
479 479
480 480 def recover(self):
481 481 l = self.lock()
482 482 if os.path.exists(self.sjoin("journal")):
483 483 self.ui.status(_("rolling back interrupted transaction\n"))
484 484 transaction.rollback(self.sopener, self.sjoin("journal"))
485 485 self.reload()
486 486 return True
487 487 else:
488 488 self.ui.warn(_("no interrupted transaction available\n"))
489 489 return False
490 490
491 491 def rollback(self, wlock=None):
492 492 if not wlock:
493 493 wlock = self.wlock()
494 494 l = self.lock()
495 495 if os.path.exists(self.sjoin("undo")):
496 496 self.ui.status(_("rolling back last transaction\n"))
497 497 transaction.rollback(self.sopener, self.sjoin("undo"))
498 498 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
499 499 self.reload()
500 500 self.wreload()
501 501 else:
502 502 self.ui.warn(_("no rollback information available\n"))
503 503
504 504 def wreload(self):
505 505 self.dirstate.read()
506 506
507 507 def reload(self):
508 508 self.changelog.load()
509 509 self.manifest.load()
510 510 self.tagscache = None
511 511 self.nodetagscache = None
512 512
513 513 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
514 514 desc=None):
515 515 try:
516 516 l = lock.lock(lockname, 0, releasefn, desc=desc)
517 517 except lock.LockHeld, inst:
518 518 if not wait:
519 519 raise
520 self.ui.warn(_("waiting for lock on %s held by %s\n") %
521 (desc, inst.args[0]))
520 self.ui.warn(_("waiting for lock on %s held by %r\n") %
521 (desc, inst.locker))
522 522 # default to 600 seconds timeout
523 523 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
524 524 releasefn, desc=desc)
525 525 if acquirefn:
526 526 acquirefn()
527 527 return l
528 528
529 529 def lock(self, wait=1):
530 530 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
531 531 desc=_('repository %s') % self.origroot)
532 532
533 533 def wlock(self, wait=1):
534 534 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
535 535 self.wreload,
536 536 desc=_('working directory of %s') % self.origroot)
537 537
538 538 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
539 539 """
540 540 commit an individual file as part of a larger transaction
541 541 """
542 542
543 543 t = self.wread(fn)
544 544 fl = self.file(fn)
545 545 fp1 = manifest1.get(fn, nullid)
546 546 fp2 = manifest2.get(fn, nullid)
547 547
548 548 meta = {}
549 549 cp = self.dirstate.copied(fn)
550 550 if cp:
551 551 meta["copy"] = cp
552 552 if not manifest2: # not a branch merge
553 553 meta["copyrev"] = hex(manifest1.get(cp, nullid))
554 554 fp2 = nullid
555 555 elif fp2 != nullid: # copied on remote side
556 556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
557 557 else: # copied on local side, reversed
558 558 meta["copyrev"] = hex(manifest2.get(cp))
559 559 fp2 = nullid
560 560 self.ui.debug(_(" %s: copy %s:%s\n") %
561 561 (fn, cp, meta["copyrev"]))
562 562 fp1 = nullid
563 563 elif fp2 != nullid:
564 564 # is one parent an ancestor of the other?
565 565 fpa = fl.ancestor(fp1, fp2)
566 566 if fpa == fp1:
567 567 fp1, fp2 = fp2, nullid
568 568 elif fpa == fp2:
569 569 fp2 = nullid
570 570
571 571 # is the file unmodified from the parent? report existing entry
572 572 if fp2 == nullid and not fl.cmp(fp1, t):
573 573 return fp1
574 574
575 575 changelist.append(fn)
576 576 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
577 577
578 578 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
579 579 if p1 is None:
580 580 p1, p2 = self.dirstate.parents()
581 581 return self.commit(files=files, text=text, user=user, date=date,
582 582 p1=p1, p2=p2, wlock=wlock)
583 583
584 584 def commit(self, files=None, text="", user=None, date=None,
585 585 match=util.always, force=False, lock=None, wlock=None,
586 586 force_editor=False, p1=None, p2=None, extra={}):
587 587
588 588 commit = []
589 589 remove = []
590 590 changed = []
591 591 use_dirstate = (p1 is None) # not rawcommit
592 592 extra = extra.copy()
593 593
594 594 if use_dirstate:
595 595 if files:
596 596 for f in files:
597 597 s = self.dirstate.state(f)
598 598 if s in 'nmai':
599 599 commit.append(f)
600 600 elif s == 'r':
601 601 remove.append(f)
602 602 else:
603 603 self.ui.warn(_("%s not tracked!\n") % f)
604 604 else:
605 605 changes = self.status(match=match)[:5]
606 606 modified, added, removed, deleted, unknown = changes
607 607 commit = modified + added
608 608 remove = removed
609 609 else:
610 610 commit = files
611 611
612 612 if use_dirstate:
613 613 p1, p2 = self.dirstate.parents()
614 614 update_dirstate = True
615 615 else:
616 616 p1, p2 = p1, p2 or nullid
617 617 update_dirstate = (self.dirstate.parents()[0] == p1)
618 618
619 619 c1 = self.changelog.read(p1)
620 620 c2 = self.changelog.read(p2)
621 621 m1 = self.manifest.read(c1[0]).copy()
622 622 m2 = self.manifest.read(c2[0])
623 623
624 624 if use_dirstate:
625 625 branchname = self.workingctx().branch()
626 626 else:
627 627 branchname = ""
628 628
629 629 if use_dirstate:
630 630 oldname = c1[5].get("branch", "")
631 631 if not commit and not remove and not force and p2 == nullid and \
632 632 branchname == oldname:
633 633 self.ui.status(_("nothing changed\n"))
634 634 return None
635 635
636 636 xp1 = hex(p1)
637 637 if p2 == nullid: xp2 = ''
638 638 else: xp2 = hex(p2)
639 639
640 640 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
641 641
642 642 if not wlock:
643 643 wlock = self.wlock()
644 644 if not lock:
645 645 lock = self.lock()
646 646 tr = self.transaction()
647 647
648 648 # check in files
649 649 new = {}
650 650 linkrev = self.changelog.count()
651 651 commit.sort()
652 652 for f in commit:
653 653 self.ui.note(f + "\n")
654 654 try:
655 655 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
656 656 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
657 657 except IOError:
658 658 if use_dirstate:
659 659 self.ui.warn(_("trouble committing %s!\n") % f)
660 660 raise
661 661 else:
662 662 remove.append(f)
663 663
664 664 # update manifest
665 665 m1.update(new)
666 666 remove.sort()
667 667
668 668 for f in remove:
669 669 if f in m1:
670 670 del m1[f]
671 671 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
672 672
673 673 # add changeset
674 674 new = new.keys()
675 675 new.sort()
676 676
677 677 user = user or self.ui.username()
678 678 if not text or force_editor:
679 679 edittext = []
680 680 if text:
681 681 edittext.append(text)
682 682 edittext.append("")
683 683 if p2 != nullid:
684 684 edittext.append("HG: branch merge")
685 685 edittext.extend(["HG: changed %s" % f for f in changed])
686 686 edittext.extend(["HG: removed %s" % f for f in remove])
687 687 if not changed and not remove:
688 688 edittext.append("HG: no files changed")
689 689 edittext.append("")
690 690 # run editor in the repository root
691 691 olddir = os.getcwd()
692 692 os.chdir(self.root)
693 693 text = self.ui.edit("\n".join(edittext), user)
694 694 os.chdir(olddir)
695 695
696 696 lines = [line.rstrip() for line in text.rstrip().splitlines()]
697 697 while lines and not lines[0]:
698 698 del lines[0]
699 699 if not lines:
700 700 return None
701 701 text = '\n'.join(lines)
702 702 if branchname:
703 703 extra["branch"] = branchname
704 704 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
705 705 user, date, extra)
706 706 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
707 707 parent2=xp2)
708 708 tr.close()
709 709
710 710 if use_dirstate or update_dirstate:
711 711 self.dirstate.setparents(n)
712 712 if use_dirstate:
713 713 self.dirstate.update(new, "n")
714 714 self.dirstate.forget(remove)
715 715
716 716 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
717 717 return n
718 718
719 719 def walk(self, node=None, files=[], match=util.always, badmatch=None):
720 720 '''
721 721 walk recursively through the directory tree or a given
722 722 changeset, finding all files matched by the match
723 723 function
724 724
725 725 results are yielded in a tuple (src, filename), where src
726 726 is one of:
727 727 'f' the file was found in the directory tree
728 728 'm' the file was only in the dirstate and not in the tree
729 729 'b' file was not found and matched badmatch
730 730 '''
731 731
732 732 if node:
733 733 fdict = dict.fromkeys(files)
734 734 for fn in self.manifest.read(self.changelog.read(node)[0]):
735 735 for ffn in fdict:
736 736 # match if the file is the exact name or a directory
737 737 if ffn == fn or fn.startswith("%s/" % ffn):
738 738 del fdict[ffn]
739 739 break
740 740 if match(fn):
741 741 yield 'm', fn
742 742 for fn in fdict:
743 743 if badmatch and badmatch(fn):
744 744 if match(fn):
745 745 yield 'b', fn
746 746 else:
747 747 self.ui.warn(_('%s: No such file in rev %s\n') % (
748 748 util.pathto(self.getcwd(), fn), short(node)))
749 749 else:
750 750 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
751 751 yield src, fn
752 752
753 753 def status(self, node1=None, node2=None, files=[], match=util.always,
754 754 wlock=None, list_ignored=False, list_clean=False):
755 755 """return status of files between two nodes or node and working directory
756 756
757 757 If node1 is None, use the first dirstate parent instead.
758 758 If node2 is None, compare node1 with working directory.
759 759 """
760 760
761 761 def fcmp(fn, mf):
762 762 t1 = self.wread(fn)
763 763 return self.file(fn).cmp(mf.get(fn, nullid), t1)
764 764
765 765 def mfmatches(node):
766 766 change = self.changelog.read(node)
767 767 mf = self.manifest.read(change[0]).copy()
768 768 for fn in mf.keys():
769 769 if not match(fn):
770 770 del mf[fn]
771 771 return mf
772 772
773 773 modified, added, removed, deleted, unknown = [], [], [], [], []
774 774 ignored, clean = [], []
775 775
776 776 compareworking = False
777 777 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
778 778 compareworking = True
779 779
780 780 if not compareworking:
781 781 # read the manifest from node1 before the manifest from node2,
782 782 # so that we'll hit the manifest cache if we're going through
783 783 # all the revisions in parent->child order.
784 784 mf1 = mfmatches(node1)
785 785
786 786 # are we comparing the working directory?
787 787 if not node2:
788 788 if not wlock:
789 789 try:
790 790 wlock = self.wlock(wait=0)
791 791 except lock.LockException:
792 792 wlock = None
793 793 (lookup, modified, added, removed, deleted, unknown,
794 794 ignored, clean) = self.dirstate.status(files, match,
795 795 list_ignored, list_clean)
796 796
797 797 # are we comparing working dir against its parent?
798 798 if compareworking:
799 799 if lookup:
800 800 # do a full compare of any files that might have changed
801 801 mf2 = mfmatches(self.dirstate.parents()[0])
802 802 for f in lookup:
803 803 if fcmp(f, mf2):
804 804 modified.append(f)
805 805 else:
806 806 clean.append(f)
807 807 if wlock is not None:
808 808 self.dirstate.update([f], "n")
809 809 else:
810 810 # we are comparing working dir against non-parent
811 811 # generate a pseudo-manifest for the working dir
812 812 # XXX: create it in dirstate.py ?
813 813 mf2 = mfmatches(self.dirstate.parents()[0])
814 814 for f in lookup + modified + added:
815 815 mf2[f] = ""
816 816 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
817 817 for f in removed:
818 818 if f in mf2:
819 819 del mf2[f]
820 820 else:
821 821 # we are comparing two revisions
822 822 mf2 = mfmatches(node2)
823 823
824 824 if not compareworking:
825 825 # flush lists from dirstate before comparing manifests
826 826 modified, added, clean = [], [], []
827 827
828 828 # make sure to sort the files so we talk to the disk in a
829 829 # reasonable order
830 830 mf2keys = mf2.keys()
831 831 mf2keys.sort()
832 832 for fn in mf2keys:
833 833 if mf1.has_key(fn):
834 834 if mf1.flags(fn) != mf2.flags(fn) or \
835 835 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
836 836 modified.append(fn)
837 837 elif list_clean:
838 838 clean.append(fn)
839 839 del mf1[fn]
840 840 else:
841 841 added.append(fn)
842 842
843 843 removed = mf1.keys()
844 844
845 845 # sort and return results:
846 846 for l in modified, added, removed, deleted, unknown, ignored, clean:
847 847 l.sort()
848 848 return (modified, added, removed, deleted, unknown, ignored, clean)
849 849
850 850 def add(self, list, wlock=None):
851 851 if not wlock:
852 852 wlock = self.wlock()
853 853 for f in list:
854 854 p = self.wjoin(f)
855 855 if not os.path.exists(p):
856 856 self.ui.warn(_("%s does not exist!\n") % f)
857 857 elif not os.path.isfile(p):
858 858 self.ui.warn(_("%s not added: only files supported currently\n")
859 859 % f)
860 860 elif self.dirstate.state(f) in 'an':
861 861 self.ui.warn(_("%s already tracked!\n") % f)
862 862 else:
863 863 self.dirstate.update([f], "a")
864 864
865 865 def forget(self, list, wlock=None):
866 866 if not wlock:
867 867 wlock = self.wlock()
868 868 for f in list:
869 869 if self.dirstate.state(f) not in 'ai':
870 870 self.ui.warn(_("%s not added!\n") % f)
871 871 else:
872 872 self.dirstate.forget([f])
873 873
874 874 def remove(self, list, unlink=False, wlock=None):
875 875 if unlink:
876 876 for f in list:
877 877 try:
878 878 util.unlink(self.wjoin(f))
879 879 except OSError, inst:
880 880 if inst.errno != errno.ENOENT:
881 881 raise
882 882 if not wlock:
883 883 wlock = self.wlock()
884 884 for f in list:
885 885 p = self.wjoin(f)
886 886 if os.path.exists(p):
887 887 self.ui.warn(_("%s still exists!\n") % f)
888 888 elif self.dirstate.state(f) == 'a':
889 889 self.dirstate.forget([f])
890 890 elif f not in self.dirstate:
891 891 self.ui.warn(_("%s not tracked!\n") % f)
892 892 else:
893 893 self.dirstate.update([f], "r")
894 894
895 895 def undelete(self, list, wlock=None):
896 896 p = self.dirstate.parents()[0]
897 897 mn = self.changelog.read(p)[0]
898 898 m = self.manifest.read(mn)
899 899 if not wlock:
900 900 wlock = self.wlock()
901 901 for f in list:
902 902 if self.dirstate.state(f) not in "r":
903 903 self.ui.warn("%s not removed!\n" % f)
904 904 else:
905 905 t = self.file(f).read(m[f])
906 906 self.wwrite(f, t)
907 907 util.set_exec(self.wjoin(f), m.execf(f))
908 908 self.dirstate.update([f], "n")
909 909
910 910 def copy(self, source, dest, wlock=None):
911 911 p = self.wjoin(dest)
912 912 if not os.path.exists(p):
913 913 self.ui.warn(_("%s does not exist!\n") % dest)
914 914 elif not os.path.isfile(p):
915 915 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
916 916 else:
917 917 if not wlock:
918 918 wlock = self.wlock()
919 919 if self.dirstate.state(dest) == '?':
920 920 self.dirstate.update([dest], "a")
921 921 self.dirstate.copy(source, dest)
922 922
923 923 def heads(self, start=None):
924 924 heads = self.changelog.heads(start)
925 925 # sort the output in rev descending order
926 926 heads = [(-self.changelog.rev(h), h) for h in heads]
927 927 heads.sort()
928 928 return [n for (r, n) in heads]
929 929
930 930 # branchlookup returns a dict giving a list of branches for
931 931 # each head. A branch is defined as the tag of a node or
932 932 # the branch of the node's parents. If a node has multiple
933 933 # branch tags, tags are eliminated if they are visible from other
934 934 # branch tags.
935 935 #
936 936 # So, for this graph: a->b->c->d->e
937 937 # \ /
938 938 # aa -----/
939 939 # a has tag 2.6.12
940 940 # d has tag 2.6.13
941 941 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
942 942 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
943 943 # from the list.
944 944 #
945 945 # It is possible that more than one head will have the same branch tag.
946 946 # callers need to check the result for multiple heads under the same
947 947 # branch tag if that is a problem for them (ie checkout of a specific
948 948 # branch).
949 949 #
950 950 # passing in a specific branch will limit the depth of the search
951 951 # through the parents. It won't limit the branches returned in the
952 952 # result though.
953 953 def branchlookup(self, heads=None, branch=None):
954 954 if not heads:
955 955 heads = self.heads()
956 956 headt = [ h for h in heads ]
957 957 chlog = self.changelog
958 958 branches = {}
959 959 merges = []
960 960 seenmerge = {}
961 961
962 962 # traverse the tree once for each head, recording in the branches
963 963 # dict which tags are visible from this head. The branches
964 964 # dict also records which tags are visible from each tag
965 965 # while we traverse.
966 966 while headt or merges:
967 967 if merges:
968 968 n, found = merges.pop()
969 969 visit = [n]
970 970 else:
971 971 h = headt.pop()
972 972 visit = [h]
973 973 found = [h]
974 974 seen = {}
975 975 while visit:
976 976 n = visit.pop()
977 977 if n in seen:
978 978 continue
979 979 pp = chlog.parents(n)
980 980 tags = self.nodetags(n)
981 981 if tags:
982 982 for x in tags:
983 983 if x == 'tip':
984 984 continue
985 985 for f in found:
986 986 branches.setdefault(f, {})[n] = 1
987 987 branches.setdefault(n, {})[n] = 1
988 988 break
989 989 if n not in found:
990 990 found.append(n)
991 991 if branch in tags:
992 992 continue
993 993 seen[n] = 1
994 994 if pp[1] != nullid and n not in seenmerge:
995 995 merges.append((pp[1], [x for x in found]))
996 996 seenmerge[n] = 1
997 997 if pp[0] != nullid:
998 998 visit.append(pp[0])
999 999 # traverse the branches dict, eliminating branch tags from each
1000 1000 # head that are visible from another branch tag for that head.
1001 1001 out = {}
1002 1002 viscache = {}
1003 1003 for h in heads:
1004 1004 def visible(node):
1005 1005 if node in viscache:
1006 1006 return viscache[node]
1007 1007 ret = {}
1008 1008 visit = [node]
1009 1009 while visit:
1010 1010 x = visit.pop()
1011 1011 if x in viscache:
1012 1012 ret.update(viscache[x])
1013 1013 elif x not in ret:
1014 1014 ret[x] = 1
1015 1015 if x in branches:
1016 1016 visit[len(visit):] = branches[x].keys()
1017 1017 viscache[node] = ret
1018 1018 return ret
1019 1019 if h not in branches:
1020 1020 continue
1021 1021 # O(n^2), but somewhat limited. This only searches the
1022 1022 # tags visible from a specific head, not all the tags in the
1023 1023 # whole repo.
1024 1024 for b in branches[h]:
1025 1025 vis = False
1026 1026 for bb in branches[h].keys():
1027 1027 if b != bb:
1028 1028 if b in visible(bb):
1029 1029 vis = True
1030 1030 break
1031 1031 if not vis:
1032 1032 l = out.setdefault(h, [])
1033 1033 l[len(l):] = self.nodetags(b)
1034 1034 return out
1035 1035
1036 1036 def branches(self, nodes):
1037 1037 if not nodes:
1038 1038 nodes = [self.changelog.tip()]
1039 1039 b = []
1040 1040 for n in nodes:
1041 1041 t = n
1042 1042 while 1:
1043 1043 p = self.changelog.parents(n)
1044 1044 if p[1] != nullid or p[0] == nullid:
1045 1045 b.append((t, n, p[0], p[1]))
1046 1046 break
1047 1047 n = p[0]
1048 1048 return b
1049 1049
1050 1050 def between(self, pairs):
1051 1051 r = []
1052 1052
1053 1053 for top, bottom in pairs:
1054 1054 n, l, i = top, [], 0
1055 1055 f = 1
1056 1056
1057 1057 while n != bottom:
1058 1058 p = self.changelog.parents(n)[0]
1059 1059 if i == f:
1060 1060 l.append(n)
1061 1061 f = f * 2
1062 1062 n = p
1063 1063 i += 1
1064 1064
1065 1065 r.append(l)
1066 1066
1067 1067 return r
1068 1068
1069 1069 def findincoming(self, remote, base=None, heads=None, force=False):
1070 1070 """Return list of roots of the subsets of missing nodes from remote
1071 1071
1072 1072 If base dict is specified, assume that these nodes and their parents
1073 1073 exist on the remote side and that no child of a node of base exists
1074 1074 in both remote and self.
1075 1075 Furthermore base will be updated to include the nodes that exists
1076 1076 in self and remote but no children exists in self and remote.
1077 1077 If a list of heads is specified, return only nodes which are heads
1078 1078 or ancestors of these heads.
1079 1079
1080 1080 All the ancestors of base are in self and in remote.
1081 1081 All the descendants of the list returned are missing in self.
1082 1082 (and so we know that the rest of the nodes are missing in remote, see
1083 1083 outgoing)
1084 1084 """
1085 1085 m = self.changelog.nodemap
1086 1086 search = []
1087 1087 fetch = {}
1088 1088 seen = {}
1089 1089 seenbranch = {}
1090 1090 if base == None:
1091 1091 base = {}
1092 1092
1093 1093 if not heads:
1094 1094 heads = remote.heads()
1095 1095
1096 1096 if self.changelog.tip() == nullid:
1097 1097 base[nullid] = 1
1098 1098 if heads != [nullid]:
1099 1099 return [nullid]
1100 1100 return []
1101 1101
1102 1102 # assume we're closer to the tip than the root
1103 1103 # and start by examining the heads
1104 1104 self.ui.status(_("searching for changes\n"))
1105 1105
1106 1106 unknown = []
1107 1107 for h in heads:
1108 1108 if h not in m:
1109 1109 unknown.append(h)
1110 1110 else:
1111 1111 base[h] = 1
1112 1112
1113 1113 if not unknown:
1114 1114 return []
1115 1115
1116 1116 req = dict.fromkeys(unknown)
1117 1117 reqcnt = 0
1118 1118
1119 1119 # search through remote branches
1120 1120 # a 'branch' here is a linear segment of history, with four parts:
1121 1121 # head, root, first parent, second parent
1122 1122 # (a branch always has two parents (or none) by definition)
1123 1123 unknown = remote.branches(unknown)
1124 1124 while unknown:
1125 1125 r = []
1126 1126 while unknown:
1127 1127 n = unknown.pop(0)
1128 1128 if n[0] in seen:
1129 1129 continue
1130 1130
1131 1131 self.ui.debug(_("examining %s:%s\n")
1132 1132 % (short(n[0]), short(n[1])))
1133 1133 if n[0] == nullid: # found the end of the branch
1134 1134 pass
1135 1135 elif n in seenbranch:
1136 1136 self.ui.debug(_("branch already found\n"))
1137 1137 continue
1138 1138 elif n[1] and n[1] in m: # do we know the base?
1139 1139 self.ui.debug(_("found incomplete branch %s:%s\n")
1140 1140 % (short(n[0]), short(n[1])))
1141 1141 search.append(n) # schedule branch range for scanning
1142 1142 seenbranch[n] = 1
1143 1143 else:
1144 1144 if n[1] not in seen and n[1] not in fetch:
1145 1145 if n[2] in m and n[3] in m:
1146 1146 self.ui.debug(_("found new changeset %s\n") %
1147 1147 short(n[1]))
1148 1148 fetch[n[1]] = 1 # earliest unknown
1149 1149 for p in n[2:4]:
1150 1150 if p in m:
1151 1151 base[p] = 1 # latest known
1152 1152
1153 1153 for p in n[2:4]:
1154 1154 if p not in req and p not in m:
1155 1155 r.append(p)
1156 1156 req[p] = 1
1157 1157 seen[n[0]] = 1
1158 1158
1159 1159 if r:
1160 1160 reqcnt += 1
1161 1161 self.ui.debug(_("request %d: %s\n") %
1162 1162 (reqcnt, " ".join(map(short, r))))
1163 1163 for p in xrange(0, len(r), 10):
1164 1164 for b in remote.branches(r[p:p+10]):
1165 1165 self.ui.debug(_("received %s:%s\n") %
1166 1166 (short(b[0]), short(b[1])))
1167 1167 unknown.append(b)
1168 1168
1169 1169 # do binary search on the branches we found
1170 1170 while search:
1171 1171 n = search.pop(0)
1172 1172 reqcnt += 1
1173 1173 l = remote.between([(n[0], n[1])])[0]
1174 1174 l.append(n[1])
1175 1175 p = n[0]
1176 1176 f = 1
1177 1177 for i in l:
1178 1178 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1179 1179 if i in m:
1180 1180 if f <= 2:
1181 1181 self.ui.debug(_("found new branch changeset %s\n") %
1182 1182 short(p))
1183 1183 fetch[p] = 1
1184 1184 base[i] = 1
1185 1185 else:
1186 1186 self.ui.debug(_("narrowed branch search to %s:%s\n")
1187 1187 % (short(p), short(i)))
1188 1188 search.append((p, i))
1189 1189 break
1190 1190 p, f = i, f * 2
1191 1191
1192 1192 # sanity check our fetch list
1193 1193 for f in fetch.keys():
1194 1194 if f in m:
1195 1195 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1196 1196
1197 1197 if base.keys() == [nullid]:
1198 1198 if force:
1199 1199 self.ui.warn(_("warning: repository is unrelated\n"))
1200 1200 else:
1201 1201 raise util.Abort(_("repository is unrelated"))
1202 1202
1203 1203 self.ui.debug(_("found new changesets starting at ") +
1204 1204 " ".join([short(f) for f in fetch]) + "\n")
1205 1205
1206 1206 self.ui.debug(_("%d total queries\n") % reqcnt)
1207 1207
1208 1208 return fetch.keys()
1209 1209
1210 1210 def findoutgoing(self, remote, base=None, heads=None, force=False):
1211 1211 """Return list of nodes that are roots of subsets not in remote
1212 1212
1213 1213 If base dict is specified, assume that these nodes and their parents
1214 1214 exist on the remote side.
1215 1215 If a list of heads is specified, return only nodes which are heads
1216 1216 or ancestors of these heads, and return a second element which
1217 1217 contains all remote heads which get new children.
1218 1218 """
1219 1219 if base == None:
1220 1220 base = {}
1221 1221 self.findincoming(remote, base, heads, force=force)
1222 1222
1223 1223 self.ui.debug(_("common changesets up to ")
1224 1224 + " ".join(map(short, base.keys())) + "\n")
1225 1225
1226 1226 remain = dict.fromkeys(self.changelog.nodemap)
1227 1227
1228 1228 # prune everything remote has from the tree
1229 1229 del remain[nullid]
1230 1230 remove = base.keys()
1231 1231 while remove:
1232 1232 n = remove.pop(0)
1233 1233 if n in remain:
1234 1234 del remain[n]
1235 1235 for p in self.changelog.parents(n):
1236 1236 remove.append(p)
1237 1237
1238 1238 # find every node whose parents have been pruned
1239 1239 subset = []
1240 1240 # find every remote head that will get new children
1241 1241 updated_heads = {}
1242 1242 for n in remain:
1243 1243 p1, p2 = self.changelog.parents(n)
1244 1244 if p1 not in remain and p2 not in remain:
1245 1245 subset.append(n)
1246 1246 if heads:
1247 1247 if p1 in heads:
1248 1248 updated_heads[p1] = True
1249 1249 if p2 in heads:
1250 1250 updated_heads[p2] = True
1251 1251
1252 1252 # this is the set of all roots we have to push
1253 1253 if heads:
1254 1254 return subset, updated_heads.keys()
1255 1255 else:
1256 1256 return subset
1257 1257
1258 1258 def pull(self, remote, heads=None, force=False, lock=None):
1259 1259 mylock = False
1260 1260 if not lock:
1261 1261 lock = self.lock()
1262 1262 mylock = True
1263 1263
1264 1264 try:
1265 1265 fetch = self.findincoming(remote, force=force)
1266 1266 if fetch == [nullid]:
1267 1267 self.ui.status(_("requesting all changes\n"))
1268 1268
1269 1269 if not fetch:
1270 1270 self.ui.status(_("no changes found\n"))
1271 1271 return 0
1272 1272
1273 1273 if heads is None:
1274 1274 cg = remote.changegroup(fetch, 'pull')
1275 1275 else:
1276 1276 if 'changegroupsubset' not in remote.capabilities:
1277 1277 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1278 1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1279 1279 return self.addchangegroup(cg, 'pull', remote.url())
1280 1280 finally:
1281 1281 if mylock:
1282 1282 lock.release()
1283 1283
1284 1284 def push(self, remote, force=False, revs=None):
1285 1285 # there are two ways to push to remote repo:
1286 1286 #
1287 1287 # addchangegroup assumes local user can lock remote
1288 1288 # repo (local filesystem, old ssh servers).
1289 1289 #
1290 1290 # unbundle assumes local user cannot lock remote repo (new ssh
1291 1291 # servers, http servers).
1292 1292
1293 1293 if remote.capable('unbundle'):
1294 1294 return self.push_unbundle(remote, force, revs)
1295 1295 return self.push_addchangegroup(remote, force, revs)
1296 1296
1297 1297 def prepush(self, remote, force, revs):
1298 1298 base = {}
1299 1299 remote_heads = remote.heads()
1300 1300 inc = self.findincoming(remote, base, remote_heads, force=force)
1301 1301
1302 1302 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 1303 if revs is not None:
1304 1304 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 1305 else:
1306 1306 bases, heads = update, self.changelog.heads()
1307 1307
1308 1308 if not bases:
1309 1309 self.ui.status(_("no changes found\n"))
1310 1310 return None, 1
1311 1311 elif not force:
1312 1312 # check if we're creating new remote heads
1313 1313 # to be a remote head after push, node must be either
1314 1314 # - unknown locally
1315 1315 # - a local outgoing head descended from update
1316 1316 # - a remote head that's known locally and not
1317 1317 # ancestral to an outgoing head
1318 1318
1319 1319 warn = 0
1320 1320
1321 1321 if remote_heads == [nullid]:
1322 1322 warn = 0
1323 1323 elif not revs and len(heads) > len(remote_heads):
1324 1324 warn = 1
1325 1325 else:
1326 1326 newheads = list(heads)
1327 1327 for r in remote_heads:
1328 1328 if r in self.changelog.nodemap:
1329 1329 desc = self.changelog.heads(r)
1330 1330 l = [h for h in heads if h in desc]
1331 1331 if not l:
1332 1332 newheads.append(r)
1333 1333 else:
1334 1334 newheads.append(r)
1335 1335 if len(newheads) > len(remote_heads):
1336 1336 warn = 1
1337 1337
1338 1338 if warn:
1339 1339 self.ui.warn(_("abort: push creates new remote branches!\n"))
1340 1340 self.ui.status(_("(did you forget to merge?"
1341 1341 " use push -f to force)\n"))
1342 1342 return None, 1
1343 1343 elif inc:
1344 1344 self.ui.warn(_("note: unsynced remote changes!\n"))
1345 1345
1346 1346
1347 1347 if revs is None:
1348 1348 cg = self.changegroup(update, 'push')
1349 1349 else:
1350 1350 cg = self.changegroupsubset(update, revs, 'push')
1351 1351 return cg, remote_heads
1352 1352
1353 1353 def push_addchangegroup(self, remote, force, revs):
1354 1354 lock = remote.lock()
1355 1355
1356 1356 ret = self.prepush(remote, force, revs)
1357 1357 if ret[0] is not None:
1358 1358 cg, remote_heads = ret
1359 1359 return remote.addchangegroup(cg, 'push', self.url())
1360 1360 return ret[1]
1361 1361
1362 1362 def push_unbundle(self, remote, force, revs):
1363 1363 # local repo finds heads on server, finds out what revs it
1364 1364 # must push. once revs transferred, if server finds it has
1365 1365 # different heads (someone else won commit/push race), server
1366 1366 # aborts.
1367 1367
1368 1368 ret = self.prepush(remote, force, revs)
1369 1369 if ret[0] is not None:
1370 1370 cg, remote_heads = ret
1371 1371 if force: remote_heads = ['force']
1372 1372 return remote.unbundle(cg, remote_heads, 'push')
1373 1373 return ret[1]
1374 1374
1375 1375 def changegroupinfo(self, nodes):
1376 1376 self.ui.note(_("%d changesets found\n") % len(nodes))
1377 1377 if self.ui.debugflag:
1378 1378 self.ui.debug(_("List of changesets:\n"))
1379 1379 for node in nodes:
1380 1380 self.ui.debug("%s\n" % hex(node))
1381 1381
1382 1382 def changegroupsubset(self, bases, heads, source):
1383 1383 """This function generates a changegroup consisting of all the nodes
1384 1384 that are descendents of any of the bases, and ancestors of any of
1385 1385 the heads.
1386 1386
1387 1387 It is fairly complex as determining which filenodes and which
1388 1388 manifest nodes need to be included for the changeset to be complete
1389 1389 is non-trivial.
1390 1390
1391 1391 Another wrinkle is doing the reverse, figuring out which changeset in
1392 1392 the changegroup a particular filenode or manifestnode belongs to."""
1393 1393
1394 1394 self.hook('preoutgoing', throw=True, source=source)
1395 1395
1396 1396 # Set up some initial variables
1397 1397 # Make it easy to refer to self.changelog
1398 1398 cl = self.changelog
1399 1399 # msng is short for missing - compute the list of changesets in this
1400 1400 # changegroup.
1401 1401 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1402 1402 self.changegroupinfo(msng_cl_lst)
1403 1403 # Some bases may turn out to be superfluous, and some heads may be
1404 1404 # too. nodesbetween will return the minimal set of bases and heads
1405 1405 # necessary to re-create the changegroup.
1406 1406
1407 1407 # Known heads are the list of heads that it is assumed the recipient
1408 1408 # of this changegroup will know about.
1409 1409 knownheads = {}
1410 1410 # We assume that all parents of bases are known heads.
1411 1411 for n in bases:
1412 1412 for p in cl.parents(n):
1413 1413 if p != nullid:
1414 1414 knownheads[p] = 1
1415 1415 knownheads = knownheads.keys()
1416 1416 if knownheads:
1417 1417 # Now that we know what heads are known, we can compute which
1418 1418 # changesets are known. The recipient must know about all
1419 1419 # changesets required to reach the known heads from the null
1420 1420 # changeset.
1421 1421 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1422 1422 junk = None
1423 1423 # Transform the list into an ersatz set.
1424 1424 has_cl_set = dict.fromkeys(has_cl_set)
1425 1425 else:
1426 1426 # If there were no known heads, the recipient cannot be assumed to
1427 1427 # know about any changesets.
1428 1428 has_cl_set = {}
1429 1429
1430 1430 # Make it easy to refer to self.manifest
1431 1431 mnfst = self.manifest
1432 1432 # We don't know which manifests are missing yet
1433 1433 msng_mnfst_set = {}
1434 1434 # Nor do we know which filenodes are missing.
1435 1435 msng_filenode_set = {}
1436 1436
1437 1437 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1438 1438 junk = None
1439 1439
1440 1440 # A changeset always belongs to itself, so the changenode lookup
1441 1441 # function for a changenode is identity.
1442 1442 def identity(x):
1443 1443 return x
1444 1444
1445 1445 # A function generating function. Sets up an environment for the
1446 1446 # inner function.
1447 1447 def cmp_by_rev_func(revlog):
1448 1448 # Compare two nodes by their revision number in the environment's
1449 1449 # revision history. Since the revision number both represents the
1450 1450 # most efficient order to read the nodes in, and represents a
1451 1451 # topological sorting of the nodes, this function is often useful.
1452 1452 def cmp_by_rev(a, b):
1453 1453 return cmp(revlog.rev(a), revlog.rev(b))
1454 1454 return cmp_by_rev
1455 1455
1456 1456 # If we determine that a particular file or manifest node must be a
1457 1457 # node that the recipient of the changegroup will already have, we can
1458 1458 # also assume the recipient will have all the parents. This function
1459 1459 # prunes them from the set of missing nodes.
1460 1460 def prune_parents(revlog, hasset, msngset):
1461 1461 haslst = hasset.keys()
1462 1462 haslst.sort(cmp_by_rev_func(revlog))
1463 1463 for node in haslst:
1464 1464 parentlst = [p for p in revlog.parents(node) if p != nullid]
1465 1465 while parentlst:
1466 1466 n = parentlst.pop()
1467 1467 if n not in hasset:
1468 1468 hasset[n] = 1
1469 1469 p = [p for p in revlog.parents(n) if p != nullid]
1470 1470 parentlst.extend(p)
1471 1471 for n in hasset:
1472 1472 msngset.pop(n, None)
1473 1473
1474 1474 # This is a function generating function used to set up an environment
1475 1475 # for the inner function to execute in.
1476 1476 def manifest_and_file_collector(changedfileset):
1477 1477 # This is an information gathering function that gathers
1478 1478 # information from each changeset node that goes out as part of
1479 1479 # the changegroup. The information gathered is a list of which
1480 1480 # manifest nodes are potentially required (the recipient may
1481 1481 # already have them) and total list of all files which were
1482 1482 # changed in any changeset in the changegroup.
1483 1483 #
1484 1484 # We also remember the first changenode we saw any manifest
1485 1485 # referenced by so we can later determine which changenode 'owns'
1486 1486 # the manifest.
1487 1487 def collect_manifests_and_files(clnode):
1488 1488 c = cl.read(clnode)
1489 1489 for f in c[3]:
1490 1490 # This is to make sure we only have one instance of each
1491 1491 # filename string for each filename.
1492 1492 changedfileset.setdefault(f, f)
1493 1493 msng_mnfst_set.setdefault(c[0], clnode)
1494 1494 return collect_manifests_and_files
1495 1495
1496 1496 # Figure out which manifest nodes (of the ones we think might be part
1497 1497 # of the changegroup) the recipient must know about and remove them
1498 1498 # from the changegroup.
1499 1499 def prune_manifests():
1500 1500 has_mnfst_set = {}
1501 1501 for n in msng_mnfst_set:
1502 1502 # If a 'missing' manifest thinks it belongs to a changenode
1503 1503 # the recipient is assumed to have, obviously the recipient
1504 1504 # must have that manifest.
1505 1505 linknode = cl.node(mnfst.linkrev(n))
1506 1506 if linknode in has_cl_set:
1507 1507 has_mnfst_set[n] = 1
1508 1508 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1509 1509
1510 1510 # Use the information collected in collect_manifests_and_files to say
1511 1511 # which changenode any manifestnode belongs to.
1512 1512 def lookup_manifest_link(mnfstnode):
1513 1513 return msng_mnfst_set[mnfstnode]
1514 1514
1515 1515 # A function generating function that sets up the initial environment
1516 1516 # the inner function.
1517 1517 def filenode_collector(changedfiles):
1518 1518 next_rev = [0]
1519 1519 # This gathers information from each manifestnode included in the
1520 1520 # changegroup about which filenodes the manifest node references
1521 1521 # so we can include those in the changegroup too.
1522 1522 #
1523 1523 # It also remembers which changenode each filenode belongs to. It
1524 1524 # does this by assuming the a filenode belongs to the changenode
1525 1525 # the first manifest that references it belongs to.
1526 1526 def collect_msng_filenodes(mnfstnode):
1527 1527 r = mnfst.rev(mnfstnode)
1528 1528 if r == next_rev[0]:
1529 1529 # If the last rev we looked at was the one just previous,
1530 1530 # we only need to see a diff.
1531 1531 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1532 1532 # For each line in the delta
1533 1533 for dline in delta.splitlines():
1534 1534 # get the filename and filenode for that line
1535 1535 f, fnode = dline.split('\0')
1536 1536 fnode = bin(fnode[:40])
1537 1537 f = changedfiles.get(f, None)
1538 1538 # And if the file is in the list of files we care
1539 1539 # about.
1540 1540 if f is not None:
1541 1541 # Get the changenode this manifest belongs to
1542 1542 clnode = msng_mnfst_set[mnfstnode]
1543 1543 # Create the set of filenodes for the file if
1544 1544 # there isn't one already.
1545 1545 ndset = msng_filenode_set.setdefault(f, {})
1546 1546 # And set the filenode's changelog node to the
1547 1547 # manifest's if it hasn't been set already.
1548 1548 ndset.setdefault(fnode, clnode)
1549 1549 else:
1550 1550 # Otherwise we need a full manifest.
1551 1551 m = mnfst.read(mnfstnode)
1552 1552 # For every file in we care about.
1553 1553 for f in changedfiles:
1554 1554 fnode = m.get(f, None)
1555 1555 # If it's in the manifest
1556 1556 if fnode is not None:
1557 1557 # See comments above.
1558 1558 clnode = msng_mnfst_set[mnfstnode]
1559 1559 ndset = msng_filenode_set.setdefault(f, {})
1560 1560 ndset.setdefault(fnode, clnode)
1561 1561 # Remember the revision we hope to see next.
1562 1562 next_rev[0] = r + 1
1563 1563 return collect_msng_filenodes
1564 1564
1565 1565 # We have a list of filenodes we think we need for a file, lets remove
1566 1566 # all those we now the recipient must have.
1567 1567 def prune_filenodes(f, filerevlog):
1568 1568 msngset = msng_filenode_set[f]
1569 1569 hasset = {}
1570 1570 # If a 'missing' filenode thinks it belongs to a changenode we
1571 1571 # assume the recipient must have, then the recipient must have
1572 1572 # that filenode.
1573 1573 for n in msngset:
1574 1574 clnode = cl.node(filerevlog.linkrev(n))
1575 1575 if clnode in has_cl_set:
1576 1576 hasset[n] = 1
1577 1577 prune_parents(filerevlog, hasset, msngset)
1578 1578
1579 1579 # A function generator function that sets up the a context for the
1580 1580 # inner function.
1581 1581 def lookup_filenode_link_func(fname):
1582 1582 msngset = msng_filenode_set[fname]
1583 1583 # Lookup the changenode the filenode belongs to.
1584 1584 def lookup_filenode_link(fnode):
1585 1585 return msngset[fnode]
1586 1586 return lookup_filenode_link
1587 1587
1588 1588 # Now that we have all theses utility functions to help out and
1589 1589 # logically divide up the task, generate the group.
1590 1590 def gengroup():
1591 1591 # The set of changed files starts empty.
1592 1592 changedfiles = {}
1593 1593 # Create a changenode group generator that will call our functions
1594 1594 # back to lookup the owning changenode and collect information.
1595 1595 group = cl.group(msng_cl_lst, identity,
1596 1596 manifest_and_file_collector(changedfiles))
1597 1597 for chnk in group:
1598 1598 yield chnk
1599 1599
1600 1600 # The list of manifests has been collected by the generator
1601 1601 # calling our functions back.
1602 1602 prune_manifests()
1603 1603 msng_mnfst_lst = msng_mnfst_set.keys()
1604 1604 # Sort the manifestnodes by revision number.
1605 1605 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1606 1606 # Create a generator for the manifestnodes that calls our lookup
1607 1607 # and data collection functions back.
1608 1608 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1609 1609 filenode_collector(changedfiles))
1610 1610 for chnk in group:
1611 1611 yield chnk
1612 1612
1613 1613 # These are no longer needed, dereference and toss the memory for
1614 1614 # them.
1615 1615 msng_mnfst_lst = None
1616 1616 msng_mnfst_set.clear()
1617 1617
1618 1618 changedfiles = changedfiles.keys()
1619 1619 changedfiles.sort()
1620 1620 # Go through all our files in order sorted by name.
1621 1621 for fname in changedfiles:
1622 1622 filerevlog = self.file(fname)
1623 1623 # Toss out the filenodes that the recipient isn't really
1624 1624 # missing.
1625 1625 if msng_filenode_set.has_key(fname):
1626 1626 prune_filenodes(fname, filerevlog)
1627 1627 msng_filenode_lst = msng_filenode_set[fname].keys()
1628 1628 else:
1629 1629 msng_filenode_lst = []
1630 1630 # If any filenodes are left, generate the group for them,
1631 1631 # otherwise don't bother.
1632 1632 if len(msng_filenode_lst) > 0:
1633 1633 yield changegroup.genchunk(fname)
1634 1634 # Sort the filenodes by their revision #
1635 1635 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1636 1636 # Create a group generator and only pass in a changenode
1637 1637 # lookup function as we need to collect no information
1638 1638 # from filenodes.
1639 1639 group = filerevlog.group(msng_filenode_lst,
1640 1640 lookup_filenode_link_func(fname))
1641 1641 for chnk in group:
1642 1642 yield chnk
1643 1643 if msng_filenode_set.has_key(fname):
1644 1644 # Don't need this anymore, toss it to free memory.
1645 1645 del msng_filenode_set[fname]
1646 1646 # Signal that no more groups are left.
1647 1647 yield changegroup.closechunk()
1648 1648
1649 1649 if msng_cl_lst:
1650 1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651 1651
1652 1652 return util.chunkbuffer(gengroup())
1653 1653
1654 1654 def changegroup(self, basenodes, source):
1655 1655 """Generate a changegroup of all nodes that we have that a recipient
1656 1656 doesn't.
1657 1657
1658 1658 This is much easier than the previous function as we can assume that
1659 1659 the recipient has any changenode we aren't sending them."""
1660 1660
1661 1661 self.hook('preoutgoing', throw=True, source=source)
1662 1662
1663 1663 cl = self.changelog
1664 1664 nodes = cl.nodesbetween(basenodes, None)[0]
1665 1665 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1666 1666 self.changegroupinfo(nodes)
1667 1667
1668 1668 def identity(x):
1669 1669 return x
1670 1670
1671 1671 def gennodelst(revlog):
1672 1672 for r in xrange(0, revlog.count()):
1673 1673 n = revlog.node(r)
1674 1674 if revlog.linkrev(n) in revset:
1675 1675 yield n
1676 1676
1677 1677 def changed_file_collector(changedfileset):
1678 1678 def collect_changed_files(clnode):
1679 1679 c = cl.read(clnode)
1680 1680 for fname in c[3]:
1681 1681 changedfileset[fname] = 1
1682 1682 return collect_changed_files
1683 1683
1684 1684 def lookuprevlink_func(revlog):
1685 1685 def lookuprevlink(n):
1686 1686 return cl.node(revlog.linkrev(n))
1687 1687 return lookuprevlink
1688 1688
1689 1689 def gengroup():
1690 1690 # construct a list of all changed files
1691 1691 changedfiles = {}
1692 1692
1693 1693 for chnk in cl.group(nodes, identity,
1694 1694 changed_file_collector(changedfiles)):
1695 1695 yield chnk
1696 1696 changedfiles = changedfiles.keys()
1697 1697 changedfiles.sort()
1698 1698
1699 1699 mnfst = self.manifest
1700 1700 nodeiter = gennodelst(mnfst)
1701 1701 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1702 1702 yield chnk
1703 1703
1704 1704 for fname in changedfiles:
1705 1705 filerevlog = self.file(fname)
1706 1706 nodeiter = gennodelst(filerevlog)
1707 1707 nodeiter = list(nodeiter)
1708 1708 if nodeiter:
1709 1709 yield changegroup.genchunk(fname)
1710 1710 lookup = lookuprevlink_func(filerevlog)
1711 1711 for chnk in filerevlog.group(nodeiter, lookup):
1712 1712 yield chnk
1713 1713
1714 1714 yield changegroup.closechunk()
1715 1715
1716 1716 if nodes:
1717 1717 self.hook('outgoing', node=hex(nodes[0]), source=source)
1718 1718
1719 1719 return util.chunkbuffer(gengroup())
1720 1720
1721 1721 def addchangegroup(self, source, srctype, url):
1722 1722 """add changegroup to repo.
1723 1723 returns number of heads modified or added + 1."""
1724 1724
1725 1725 def csmap(x):
1726 1726 self.ui.debug(_("add changeset %s\n") % short(x))
1727 1727 return cl.count()
1728 1728
1729 1729 def revmap(x):
1730 1730 return cl.rev(x)
1731 1731
1732 1732 if not source:
1733 1733 return 0
1734 1734
1735 1735 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1736 1736
1737 1737 changesets = files = revisions = 0
1738 1738
1739 1739 tr = self.transaction()
1740 1740
1741 1741 # write changelog data to temp files so concurrent readers will not see
1742 1742 # inconsistent view
1743 1743 cl = None
1744 1744 try:
1745 1745 cl = appendfile.appendchangelog(self.sopener,
1746 1746 self.changelog.version)
1747 1747
1748 1748 oldheads = len(cl.heads())
1749 1749
1750 1750 # pull off the changeset group
1751 1751 self.ui.status(_("adding changesets\n"))
1752 1752 cor = cl.count() - 1
1753 1753 chunkiter = changegroup.chunkiter(source)
1754 1754 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1755 1755 raise util.Abort(_("received changelog group is empty"))
1756 1756 cnr = cl.count() - 1
1757 1757 changesets = cnr - cor
1758 1758
1759 1759 # pull off the manifest group
1760 1760 self.ui.status(_("adding manifests\n"))
1761 1761 chunkiter = changegroup.chunkiter(source)
1762 1762 # no need to check for empty manifest group here:
1763 1763 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1764 1764 # no new manifest will be created and the manifest group will
1765 1765 # be empty during the pull
1766 1766 self.manifest.addgroup(chunkiter, revmap, tr)
1767 1767
1768 1768 # process the files
1769 1769 self.ui.status(_("adding file changes\n"))
1770 1770 while 1:
1771 1771 f = changegroup.getchunk(source)
1772 1772 if not f:
1773 1773 break
1774 1774 self.ui.debug(_("adding %s revisions\n") % f)
1775 1775 fl = self.file(f)
1776 1776 o = fl.count()
1777 1777 chunkiter = changegroup.chunkiter(source)
1778 1778 if fl.addgroup(chunkiter, revmap, tr) is None:
1779 1779 raise util.Abort(_("received file revlog group is empty"))
1780 1780 revisions += fl.count() - o
1781 1781 files += 1
1782 1782
1783 1783 cl.writedata()
1784 1784 finally:
1785 1785 if cl:
1786 1786 cl.cleanup()
1787 1787
1788 1788 # make changelog see real files again
1789 1789 self.changelog = changelog.changelog(self.sopener,
1790 1790 self.changelog.version)
1791 1791 self.changelog.checkinlinesize(tr)
1792 1792
1793 1793 newheads = len(self.changelog.heads())
1794 1794 heads = ""
1795 1795 if oldheads and newheads != oldheads:
1796 1796 heads = _(" (%+d heads)") % (newheads - oldheads)
1797 1797
1798 1798 self.ui.status(_("added %d changesets"
1799 1799 " with %d changes to %d files%s\n")
1800 1800 % (changesets, revisions, files, heads))
1801 1801
1802 1802 if changesets > 0:
1803 1803 self.hook('pretxnchangegroup', throw=True,
1804 1804 node=hex(self.changelog.node(cor+1)), source=srctype,
1805 1805 url=url)
1806 1806
1807 1807 tr.close()
1808 1808
1809 1809 if changesets > 0:
1810 1810 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1811 1811 source=srctype, url=url)
1812 1812
1813 1813 for i in xrange(cor + 1, cnr + 1):
1814 1814 self.hook("incoming", node=hex(self.changelog.node(i)),
1815 1815 source=srctype, url=url)
1816 1816
1817 1817 return newheads - oldheads + 1
1818 1818
1819 1819
1820 1820 def stream_in(self, remote):
1821 1821 fp = remote.stream_out()
1822 1822 l = fp.readline()
1823 1823 try:
1824 1824 resp = int(l)
1825 1825 except ValueError:
1826 1826 raise util.UnexpectedOutput(
1827 1827 _('Unexpected response from remote server:'), l)
1828 1828 if resp == 1:
1829 1829 raise util.Abort(_('operation forbidden by server'))
1830 1830 elif resp == 2:
1831 1831 raise util.Abort(_('locking the remote repository failed'))
1832 1832 elif resp != 0:
1833 1833 raise util.Abort(_('the server sent an unknown error code'))
1834 1834 self.ui.status(_('streaming all changes\n'))
1835 1835 l = fp.readline()
1836 1836 try:
1837 1837 total_files, total_bytes = map(int, l.split(' ', 1))
1838 1838 except ValueError, TypeError:
1839 1839 raise util.UnexpectedOutput(
1840 1840 _('Unexpected response from remote server:'), l)
1841 1841 self.ui.status(_('%d files to transfer, %s of data\n') %
1842 1842 (total_files, util.bytecount(total_bytes)))
1843 1843 start = time.time()
1844 1844 for i in xrange(total_files):
1845 1845 l = fp.readline()
1846 1846 try:
1847 1847 name, size = l.split('\0', 1)
1848 1848 size = int(size)
1849 1849 except ValueError, TypeError:
1850 1850 raise util.UnexpectedOutput(
1851 1851 _('Unexpected response from remote server:'), l)
1852 1852 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1853 1853 ofp = self.sopener(name, 'w')
1854 1854 for chunk in util.filechunkiter(fp, limit=size):
1855 1855 ofp.write(chunk)
1856 1856 ofp.close()
1857 1857 elapsed = time.time() - start
1858 1858 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1859 1859 (util.bytecount(total_bytes), elapsed,
1860 1860 util.bytecount(total_bytes / elapsed)))
1861 1861 self.reload()
1862 1862 return len(self.heads()) + 1
1863 1863
1864 1864 def clone(self, remote, heads=[], stream=False):
1865 1865 '''clone remote repository.
1866 1866
1867 1867 keyword arguments:
1868 1868 heads: list of revs to clone (forces use of pull)
1869 1869 stream: use streaming clone if possible'''
1870 1870
1871 1871 # now, all clients that can request uncompressed clones can
1872 1872 # read repo formats supported by all servers that can serve
1873 1873 # them.
1874 1874
1875 1875 # if revlog format changes, client will have to check version
1876 1876 # and format flags on "stream" capability, and use
1877 1877 # uncompressed only if compatible.
1878 1878
1879 1879 if stream and not heads and remote.capable('stream'):
1880 1880 return self.stream_in(remote)
1881 1881 return self.pull(remote, heads)
1882 1882
1883 1883 # used to avoid circular references so destructors work
1884 1884 def aftertrans(base):
1885 1885 p = base
1886 1886 def a():
1887 1887 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1888 1888 util.rename(os.path.join(p, "journal.dirstate"),
1889 1889 os.path.join(p, "undo.dirstate"))
1890 1890 return a
1891 1891
1892 1892 def instance(ui, path, create):
1893 1893 return localrepository(ui, util.drop_scheme('file', path), create)
1894 1894
1895 1895 def islocal(path):
1896 1896 return True
General Comments 0
You need to be logged in to leave comments. Login now