##// END OF EJS Templates
Revert almost all of 5be434785317; add a test...
Alexis S. L. Carvalho -
r3675:6990e499 default
parent child Browse files
Show More
@@ -0,0 +1,14 b''
1 #!/bin/sh
2
3 hg init dir
4 cd dir
5 echo bleh > bar
6 hg add bar
7 hg ci -m 'add bar'
8
9 hg cp bar foo
10 echo >> bar
11 hg ci -m 'cp bar foo; change bar'
12
13 hg debugrename foo
14 hg debugindex .hg/data/bar.i
@@ -0,0 +1,4 b''
1 foo renamed from bar:26d3ca0dfd18e44d796b564e38dd173c9668d3a9
2 rev offset length base linkrev nodeid p1 p2
3 0 0 6 0 0 26d3ca0dfd18 000000000000 000000000000
4 1 6 7 1 1 d267bddd54f7 26d3ca0dfd18 000000000000
@@ -1,1868 +1,1871 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.realpath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.sopener = util.opener(self.path)
51 51 self.wopener = util.opener(self.root)
52 52
53 53 try:
54 54 self.ui.readconfig(self.join("hgrc"), self.root)
55 55 except IOError:
56 56 pass
57 57
58 58 v = self.ui.configrevlog()
59 59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 61 fl = v.get('flags', None)
62 62 flags = 0
63 63 if fl != None:
64 64 for x in fl.split():
65 65 flags |= revlog.flagstr(x)
66 66 elif self.revlogv1:
67 67 flags = revlog.REVLOG_DEFAULT_FLAGS
68 68
69 69 v = self.revlogversion | flags
70 70 self.manifest = manifest.manifest(self.sopener, v)
71 71 self.changelog = changelog.changelog(self.sopener, v)
72 72
73 73 # the changelog might not have the inline index flag
74 74 # on. If the format of the changelog is the same as found in
75 75 # .hgrc, apply any flags found in the .hgrc as well.
76 76 # Otherwise, just version from the changelog
77 77 v = self.changelog.version
78 78 if v == self.revlogversion:
79 79 v |= flags
80 80 self.revlogversion = v
81 81
82 82 self.tagscache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.encodepats = None
86 86 self.decodepats = None
87 87 self.transhandle = None
88 88
89 89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90 90
91 91 def url(self):
92 92 return 'file:' + self.root
93 93
94 94 def hook(self, name, throw=False, **args):
95 95 def callhook(hname, funcname):
96 96 '''call python hook. hook is callable object, looked up as
97 97 name in python module. if callable returns "true", hook
98 98 fails, else passes. if hook raises exception, treated as
99 99 hook failure. exception propagates if throw is "true".
100 100
101 101 reason for "true" meaning "hook failed" is so that
102 102 unmodified commands (e.g. mercurial.commands.update) can
103 103 be run as hooks without wrappers to convert return values.'''
104 104
105 105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 106 d = funcname.rfind('.')
107 107 if d == -1:
108 108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 109 % (hname, funcname))
110 110 modname = funcname[:d]
111 111 try:
112 112 obj = __import__(modname)
113 113 except ImportError:
114 114 try:
115 115 # extensions are loaded with hgext_ prefix
116 116 obj = __import__("hgext_%s" % modname)
117 117 except ImportError:
118 118 raise util.Abort(_('%s hook is invalid '
119 119 '(import of "%s" failed)') %
120 120 (hname, modname))
121 121 try:
122 122 for p in funcname.split('.')[1:]:
123 123 obj = getattr(obj, p)
124 124 except AttributeError, err:
125 125 raise util.Abort(_('%s hook is invalid '
126 126 '("%s" is not defined)') %
127 127 (hname, funcname))
128 128 if not callable(obj):
129 129 raise util.Abort(_('%s hook is invalid '
130 130 '("%s" is not callable)') %
131 131 (hname, funcname))
132 132 try:
133 133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 134 except (KeyboardInterrupt, util.SignalInterrupt):
135 135 raise
136 136 except Exception, exc:
137 137 if isinstance(exc, util.Abort):
138 138 self.ui.warn(_('error: %s hook failed: %s\n') %
139 139 (hname, exc.args[0]))
140 140 else:
141 141 self.ui.warn(_('error: %s hook raised an exception: '
142 142 '%s\n') % (hname, exc))
143 143 if throw:
144 144 raise
145 145 self.ui.print_exc()
146 146 return True
147 147 if r:
148 148 if throw:
149 149 raise util.Abort(_('%s hook failed') % hname)
150 150 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 151 return r
152 152
153 153 def runhook(name, cmd):
154 154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 156 r = util.system(cmd, environ=env, cwd=self.root)
157 157 if r:
158 158 desc, r = util.explain_exit(r)
159 159 if throw:
160 160 raise util.Abort(_('%s hook %s') % (name, desc))
161 161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 162 return r
163 163
164 164 r = False
165 165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 166 if hname.split(".", 1)[0] == name and cmd]
167 167 hooks.sort()
168 168 for hname, cmd in hooks:
169 169 if cmd.startswith('python:'):
170 170 r = callhook(hname, cmd[7:].strip()) or r
171 171 else:
172 172 r = runhook(hname, cmd) or r
173 173 return r
174 174
175 175 tag_disallowed = ':\r\n'
176 176
177 177 def tag(self, name, node, message, local, user, date):
178 178 '''tag a revision with a symbolic name.
179 179
180 180 if local is True, the tag is stored in a per-repository file.
181 181 otherwise, it is stored in the .hgtags file, and a new
182 182 changeset is committed with the change.
183 183
184 184 keyword arguments:
185 185
186 186 local: whether to store tag in non-version-controlled file
187 187 (default False)
188 188
189 189 message: commit message to use if committing
190 190
191 191 user: name of user to use if committing
192 192
193 193 date: date tuple to use if committing'''
194 194
195 195 for c in self.tag_disallowed:
196 196 if c in name:
197 197 raise util.Abort(_('%r cannot be used in a tag name') % c)
198 198
199 199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200 200
201 201 if local:
202 202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 203 self.hook('tag', node=hex(node), tag=name, local=local)
204 204 return
205 205
206 206 for x in self.status()[:5]:
207 207 if '.hgtags' in x:
208 208 raise util.Abort(_('working copy of .hgtags is changed '
209 209 '(please commit .hgtags manually)'))
210 210
211 211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 212 if self.dirstate.state('.hgtags') == '?':
213 213 self.add(['.hgtags'])
214 214
215 215 self.commit(['.hgtags'], message, user, date)
216 216 self.hook('tag', node=hex(node), tag=name, local=local)
217 217
218 218 def tags(self):
219 219 '''return a mapping of tag to node'''
220 220 if not self.tagscache:
221 221 self.tagscache = {}
222 222
223 223 def parsetag(line, context):
224 224 if not line:
225 225 return
226 226 s = l.split(" ", 1)
227 227 if len(s) != 2:
228 228 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 229 return
230 230 node, key = s
231 231 key = key.strip()
232 232 try:
233 233 bin_n = bin(node)
234 234 except TypeError:
235 235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 236 (context, node))
237 237 return
238 238 if bin_n not in self.changelog.nodemap:
239 239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 240 (context, key))
241 241 return
242 242 self.tagscache[key] = bin_n
243 243
244 244 # read the tags file from each head, ending with the tip,
245 245 # and add each tag found to the map, with "newer" ones
246 246 # taking precedence
247 247 f = None
248 248 for rev, node, fnode in self._hgtagsnodes():
249 249 f = (f and f.filectx(fnode) or
250 250 self.filectx('.hgtags', fileid=fnode))
251 251 count = 0
252 252 for l in f.data().splitlines():
253 253 count += 1
254 254 parsetag(l, _("%s, line %d") % (str(f), count))
255 255
256 256 try:
257 257 f = self.opener("localtags")
258 258 count = 0
259 259 for l in f:
260 260 count += 1
261 261 parsetag(l, _("localtags, line %d") % count)
262 262 except IOError:
263 263 pass
264 264
265 265 self.tagscache['tip'] = self.changelog.tip()
266 266
267 267 return self.tagscache
268 268
269 269 def _hgtagsnodes(self):
270 270 heads = self.heads()
271 271 heads.reverse()
272 272 last = {}
273 273 ret = []
274 274 for node in heads:
275 275 c = self.changectx(node)
276 276 rev = c.rev()
277 277 try:
278 278 fnode = c.filenode('.hgtags')
279 279 except repo.LookupError:
280 280 continue
281 281 ret.append((rev, node, fnode))
282 282 if fnode in last:
283 283 ret[last[fnode]] = None
284 284 last[fnode] = len(ret) - 1
285 285 return [item for item in ret if item]
286 286
287 287 def tagslist(self):
288 288 '''return a list of tags ordered by revision'''
289 289 l = []
290 290 for t, n in self.tags().items():
291 291 try:
292 292 r = self.changelog.rev(n)
293 293 except:
294 294 r = -2 # sort to the beginning of the list if unknown
295 295 l.append((r, t, n))
296 296 l.sort()
297 297 return [(t, n) for r, t, n in l]
298 298
299 299 def nodetags(self, node):
300 300 '''return the tags associated with a node'''
301 301 if not self.nodetagscache:
302 302 self.nodetagscache = {}
303 303 for t, n in self.tags().items():
304 304 self.nodetagscache.setdefault(n, []).append(t)
305 305 return self.nodetagscache.get(node, [])
306 306
307 307 def branchtags(self):
308 308 if self.branchcache != None:
309 309 return self.branchcache
310 310
311 311 self.branchcache = {} # avoid recursion in changectx
312 312
313 313 partial, last, lrev = self._readbranchcache()
314 314
315 315 tiprev = self.changelog.count() - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 self.branchcache = partial
321 321 return self.branchcache
322 322
323 323 def _readbranchcache(self):
324 324 partial = {}
325 325 try:
326 326 f = self.opener("branches.cache")
327 327 lines = f.read().split('\n')
328 328 f.close()
329 329 last, lrev = lines.pop(0).rstrip().split(" ", 1)
330 330 last, lrev = bin(last), int(lrev)
331 331 if (lrev < self.changelog.count() and
332 332 self.changelog.node(lrev) == last): # sanity check
333 333 for l in lines:
334 334 if not l: continue
335 335 node, label = l.rstrip().split(" ", 1)
336 336 partial[label] = bin(node)
337 337 else: # invalidate the cache
338 338 last, lrev = nullid, nullrev
339 339 except IOError:
340 340 last, lrev = nullid, nullrev
341 341 return partial, last, lrev
342 342
343 343 def _writebranchcache(self, branches, tip, tiprev):
344 344 try:
345 345 f = self.opener("branches.cache", "w")
346 346 f.write("%s %s\n" % (hex(tip), tiprev))
347 347 for label, node in branches.iteritems():
348 348 f.write("%s %s\n" % (hex(node), label))
349 349 except IOError:
350 350 pass
351 351
352 352 def _updatebranchcache(self, partial, start, end):
353 353 for r in xrange(start, end):
354 354 c = self.changectx(r)
355 355 b = c.branch()
356 356 if b:
357 357 partial[b] = c.node()
358 358
359 359 def lookup(self, key):
360 360 if key == '.':
361 361 key = self.dirstate.parents()[0]
362 362 if key == nullid:
363 363 raise repo.RepoError(_("no revision checked out"))
364 364 n = self.changelog._match(key)
365 365 if n:
366 366 return n
367 367 if key in self.tags():
368 368 return self.tags()[key]
369 369 if key in self.branchtags():
370 370 return self.branchtags()[key]
371 371 n = self.changelog._partialmatch(key)
372 372 if n:
373 373 return n
374 374 raise repo.RepoError(_("unknown revision '%s'") % key)
375 375
376 376 def dev(self):
377 377 return os.lstat(self.path).st_dev
378 378
379 379 def local(self):
380 380 return True
381 381
382 382 def join(self, f):
383 383 return os.path.join(self.path, f)
384 384
385 385 def sjoin(self, f):
386 386 return os.path.join(self.path, f)
387 387
388 388 def wjoin(self, f):
389 389 return os.path.join(self.root, f)
390 390
391 391 def file(self, f):
392 392 if f[0] == '/':
393 393 f = f[1:]
394 394 return filelog.filelog(self.sopener, f, self.revlogversion)
395 395
396 396 def changectx(self, changeid=None):
397 397 return context.changectx(self, changeid)
398 398
399 399 def workingctx(self):
400 400 return context.workingctx(self)
401 401
402 402 def parents(self, changeid=None):
403 403 '''
404 404 get list of changectxs for parents of changeid or working directory
405 405 '''
406 406 if changeid is None:
407 407 pl = self.dirstate.parents()
408 408 else:
409 409 n = self.changelog.lookup(changeid)
410 410 pl = self.changelog.parents(n)
411 411 if pl[1] == nullid:
412 412 return [self.changectx(pl[0])]
413 413 return [self.changectx(pl[0]), self.changectx(pl[1])]
414 414
415 415 def filectx(self, path, changeid=None, fileid=None):
416 416 """changeid can be a changeset revision, node, or tag.
417 417 fileid can be a file revision or node."""
418 418 return context.filectx(self, path, changeid, fileid)
419 419
420 420 def getcwd(self):
421 421 return self.dirstate.getcwd()
422 422
423 423 def wfile(self, f, mode='r'):
424 424 return self.wopener(f, mode)
425 425
426 426 def wread(self, filename):
427 427 if self.encodepats == None:
428 428 l = []
429 429 for pat, cmd in self.ui.configitems("encode"):
430 430 mf = util.matcher(self.root, "", [pat], [], [])[1]
431 431 l.append((mf, cmd))
432 432 self.encodepats = l
433 433
434 434 data = self.wopener(filename, 'r').read()
435 435
436 436 for mf, cmd in self.encodepats:
437 437 if mf(filename):
438 438 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
439 439 data = util.filter(data, cmd)
440 440 break
441 441
442 442 return data
443 443
444 444 def wwrite(self, filename, data, fd=None):
445 445 if self.decodepats == None:
446 446 l = []
447 447 for pat, cmd in self.ui.configitems("decode"):
448 448 mf = util.matcher(self.root, "", [pat], [], [])[1]
449 449 l.append((mf, cmd))
450 450 self.decodepats = l
451 451
452 452 for mf, cmd in self.decodepats:
453 453 if mf(filename):
454 454 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
455 455 data = util.filter(data, cmd)
456 456 break
457 457
458 458 if fd:
459 459 return fd.write(data)
460 460 return self.wopener(filename, 'w').write(data)
461 461
462 462 def transaction(self):
463 463 tr = self.transhandle
464 464 if tr != None and tr.running():
465 465 return tr.nest()
466 466
467 467 # save dirstate for rollback
468 468 try:
469 469 ds = self.opener("dirstate").read()
470 470 except IOError:
471 471 ds = ""
472 472 self.opener("journal.dirstate", "w").write(ds)
473 473
474 474 tr = transaction.transaction(self.ui.warn, self.sopener,
475 475 self.sjoin("journal"),
476 476 aftertrans(self.path))
477 477 self.transhandle = tr
478 478 return tr
479 479
480 480 def recover(self):
481 481 l = self.lock()
482 482 if os.path.exists(self.sjoin("journal")):
483 483 self.ui.status(_("rolling back interrupted transaction\n"))
484 484 transaction.rollback(self.sopener, self.sjoin("journal"))
485 485 self.reload()
486 486 return True
487 487 else:
488 488 self.ui.warn(_("no interrupted transaction available\n"))
489 489 return False
490 490
491 491 def rollback(self, wlock=None):
492 492 if not wlock:
493 493 wlock = self.wlock()
494 494 l = self.lock()
495 495 if os.path.exists(self.sjoin("undo")):
496 496 self.ui.status(_("rolling back last transaction\n"))
497 497 transaction.rollback(self.sopener, self.sjoin("undo"))
498 498 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
499 499 self.reload()
500 500 self.wreload()
501 501 else:
502 502 self.ui.warn(_("no rollback information available\n"))
503 503
504 504 def wreload(self):
505 505 self.dirstate.read()
506 506
507 507 def reload(self):
508 508 self.changelog.load()
509 509 self.manifest.load()
510 510 self.tagscache = None
511 511 self.nodetagscache = None
512 512
513 513 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
514 514 desc=None):
515 515 try:
516 516 l = lock.lock(lockname, 0, releasefn, desc=desc)
517 517 except lock.LockHeld, inst:
518 518 if not wait:
519 519 raise
520 520 self.ui.warn(_("waiting for lock on %s held by %s\n") %
521 521 (desc, inst.args[0]))
522 522 # default to 600 seconds timeout
523 523 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
524 524 releasefn, desc=desc)
525 525 if acquirefn:
526 526 acquirefn()
527 527 return l
528 528
529 529 def lock(self, wait=1):
530 530 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
531 531 desc=_('repository %s') % self.origroot)
532 532
533 533 def wlock(self, wait=1):
534 534 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
535 535 self.wreload,
536 536 desc=_('working directory of %s') % self.origroot)
537 537
538 538 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
539 539 """
540 540 commit an individual file as part of a larger transaction
541 541 """
542 542
543 543 t = self.wread(fn)
544 544 fl = self.file(fn)
545 545 fp1 = manifest1.get(fn, nullid)
546 546 fp2 = manifest2.get(fn, nullid)
547 547
548 548 meta = {}
549 549 cp = self.dirstate.copied(fn)
550 550 if cp:
551 551 meta["copy"] = cp
552 552 if not manifest2: # not a branch merge
553 553 meta["copyrev"] = hex(manifest1.get(cp, nullid))
554 554 fp2 = nullid
555 555 elif fp2 != nullid: # copied on remote side
556 556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
557 557 else: # copied on local side, reversed
558 558 meta["copyrev"] = hex(manifest2.get(cp))
559 559 fp2 = nullid
560 560 self.ui.debug(_(" %s: copy %s:%s\n") %
561 561 (fn, cp, meta["copyrev"]))
562 562 fp1 = nullid
563 563 elif fp2 != nullid:
564 564 # is one parent an ancestor of the other?
565 565 fpa = fl.ancestor(fp1, fp2)
566 566 if fpa == fp1:
567 567 fp1, fp2 = fp2, nullid
568 568 elif fpa == fp2:
569 569 fp2 = nullid
570 570
571 571 # is the file unmodified from the parent? report existing entry
572 572 if fp2 == nullid and not fl.cmp(fp1, t):
573 573 return fp1
574 574
575 575 changelist.append(fn)
576 576 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
577 577
578 578 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
579 579 if p1 is None:
580 580 p1, p2 = self.dirstate.parents()
581 581 return self.commit(files=files, text=text, user=user, date=date,
582 582 p1=p1, p2=p2, wlock=wlock)
583 583
584 584 def commit(self, files=None, text="", user=None, date=None,
585 585 match=util.always, force=False, lock=None, wlock=None,
586 586 force_editor=False, p1=None, p2=None, extra={}):
587 587
588 588 commit = []
589 589 remove = []
590 590 changed = []
591 591 use_dirstate = (p1 is None) # not rawcommit
592 592 extra = extra.copy()
593 593
594 594 if use_dirstate:
595 595 if files:
596 596 for f in files:
597 597 s = self.dirstate.state(f)
598 598 if s in 'nmai':
599 599 commit.append(f)
600 600 elif s == 'r':
601 601 remove.append(f)
602 602 else:
603 603 self.ui.warn(_("%s not tracked!\n") % f)
604 604 else:
605 605 changes = self.status(match=match)[:5]
606 606 modified, added, removed, deleted, unknown = changes
607 607 commit = modified + added
608 608 remove = removed
609 609 else:
610 610 commit = files
611 611
612 612 if use_dirstate:
613 613 p1, p2 = self.dirstate.parents()
614 614 update_dirstate = True
615 615 else:
616 616 p1, p2 = p1, p2 or nullid
617 617 update_dirstate = (self.dirstate.parents()[0] == p1)
618 618
619 619 c1 = self.changelog.read(p1)
620 620 c2 = self.changelog.read(p2)
621 621 m1 = self.manifest.read(c1[0]).copy()
622 622 m2 = self.manifest.read(c2[0])
623 623
624 624 if use_dirstate:
625 625 branchname = self.workingctx().branch()
626 626 else:
627 627 branchname = ""
628 628
629 629 if use_dirstate:
630 630 oldname = c1[5].get("branch", "")
631 631 if not commit and not remove and not force and p2 == nullid and \
632 632 branchname == oldname:
633 633 self.ui.status(_("nothing changed\n"))
634 634 return None
635 635
636 636 xp1 = hex(p1)
637 637 if p2 == nullid: xp2 = ''
638 638 else: xp2 = hex(p2)
639 639
640 640 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
641 641
642 642 if not wlock:
643 643 wlock = self.wlock()
644 644 if not lock:
645 645 lock = self.lock()
646 646 tr = self.transaction()
647 647
648 648 # check in files
649 new = []
649 new = {}
650 650 linkrev = self.changelog.count()
651 651 commit.sort()
652 652 for f in commit:
653 653 self.ui.note(f + "\n")
654 654 try:
655 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
655 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
656 656 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
657 new.append(f)
658 657 except IOError:
659 658 if use_dirstate:
660 659 self.ui.warn(_("trouble committing %s!\n") % f)
661 660 raise
662 661 else:
663 662 remove.append(f)
664 663
665 664 # update manifest
665 m1.update(new)
666 666 remove.sort()
667 667
668 668 for f in remove:
669 669 if f in m1:
670 670 del m1[f]
671 671 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
672 672
673 673 # add changeset
674 new = new.keys()
675 new.sort()
676
674 677 user = user or self.ui.username()
675 678 if not text or force_editor:
676 679 edittext = []
677 680 if text:
678 681 edittext.append(text)
679 682 edittext.append("")
680 683 if p2 != nullid:
681 684 edittext.append("HG: branch merge")
682 685 edittext.extend(["HG: changed %s" % f for f in changed])
683 686 edittext.extend(["HG: removed %s" % f for f in remove])
684 687 if not changed and not remove:
685 688 edittext.append("HG: no files changed")
686 689 edittext.append("")
687 690 # run editor in the repository root
688 691 olddir = os.getcwd()
689 692 os.chdir(self.root)
690 693 text = self.ui.edit("\n".join(edittext), user)
691 694 os.chdir(olddir)
692 695
693 696 lines = [line.rstrip() for line in text.rstrip().splitlines()]
694 697 while lines and not lines[0]:
695 698 del lines[0]
696 699 if not lines:
697 700 return None
698 701 text = '\n'.join(lines)
699 702 if branchname:
700 703 extra["branch"] = branchname
701 704 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
702 705 user, date, extra)
703 706 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
704 707 parent2=xp2)
705 708 tr.close()
706 709
707 710 if use_dirstate or update_dirstate:
708 711 self.dirstate.setparents(n)
709 712 if use_dirstate:
710 713 self.dirstate.update(new, "n")
711 714 self.dirstate.forget(remove)
712 715
713 716 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
714 717 return n
715 718
716 719 def walk(self, node=None, files=[], match=util.always, badmatch=None):
717 720 '''
718 721 walk recursively through the directory tree or a given
719 722 changeset, finding all files matched by the match
720 723 function
721 724
722 725 results are yielded in a tuple (src, filename), where src
723 726 is one of:
724 727 'f' the file was found in the directory tree
725 728 'm' the file was only in the dirstate and not in the tree
726 729 'b' file was not found and matched badmatch
727 730 '''
728 731
729 732 if node:
730 733 fdict = dict.fromkeys(files)
731 734 for fn in self.manifest.read(self.changelog.read(node)[0]):
732 735 for ffn in fdict:
733 736 # match if the file is the exact name or a directory
734 737 if ffn == fn or fn.startswith("%s/" % ffn):
735 738 del fdict[ffn]
736 739 break
737 740 if match(fn):
738 741 yield 'm', fn
739 742 for fn in fdict:
740 743 if badmatch and badmatch(fn):
741 744 if match(fn):
742 745 yield 'b', fn
743 746 else:
744 747 self.ui.warn(_('%s: No such file in rev %s\n') % (
745 748 util.pathto(self.getcwd(), fn), short(node)))
746 749 else:
747 750 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
748 751 yield src, fn
749 752
750 753 def status(self, node1=None, node2=None, files=[], match=util.always,
751 754 wlock=None, list_ignored=False, list_clean=False):
752 755 """return status of files between two nodes or node and working directory
753 756
754 757 If node1 is None, use the first dirstate parent instead.
755 758 If node2 is None, compare node1 with working directory.
756 759 """
757 760
758 761 def fcmp(fn, mf):
759 762 t1 = self.wread(fn)
760 763 return self.file(fn).cmp(mf.get(fn, nullid), t1)
761 764
762 765 def mfmatches(node):
763 766 change = self.changelog.read(node)
764 767 mf = self.manifest.read(change[0]).copy()
765 768 for fn in mf.keys():
766 769 if not match(fn):
767 770 del mf[fn]
768 771 return mf
769 772
770 773 modified, added, removed, deleted, unknown = [], [], [], [], []
771 774 ignored, clean = [], []
772 775
773 776 compareworking = False
774 777 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
775 778 compareworking = True
776 779
777 780 if not compareworking:
778 781 # read the manifest from node1 before the manifest from node2,
779 782 # so that we'll hit the manifest cache if we're going through
780 783 # all the revisions in parent->child order.
781 784 mf1 = mfmatches(node1)
782 785
783 786 # are we comparing the working directory?
784 787 if not node2:
785 788 if not wlock:
786 789 try:
787 790 wlock = self.wlock(wait=0)
788 791 except lock.LockException:
789 792 wlock = None
790 793 (lookup, modified, added, removed, deleted, unknown,
791 794 ignored, clean) = self.dirstate.status(files, match,
792 795 list_ignored, list_clean)
793 796
794 797 # are we comparing working dir against its parent?
795 798 if compareworking:
796 799 if lookup:
797 800 # do a full compare of any files that might have changed
798 801 mf2 = mfmatches(self.dirstate.parents()[0])
799 802 for f in lookup:
800 803 if fcmp(f, mf2):
801 804 modified.append(f)
802 805 else:
803 806 clean.append(f)
804 807 if wlock is not None:
805 808 self.dirstate.update([f], "n")
806 809 else:
807 810 # we are comparing working dir against non-parent
808 811 # generate a pseudo-manifest for the working dir
809 812 # XXX: create it in dirstate.py ?
810 813 mf2 = mfmatches(self.dirstate.parents()[0])
811 814 for f in lookup + modified + added:
812 815 mf2[f] = ""
813 816 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
814 817 for f in removed:
815 818 if f in mf2:
816 819 del mf2[f]
817 820 else:
818 821 # we are comparing two revisions
819 822 mf2 = mfmatches(node2)
820 823
821 824 if not compareworking:
822 825 # flush lists from dirstate before comparing manifests
823 826 modified, added, clean = [], [], []
824 827
825 828 # make sure to sort the files so we talk to the disk in a
826 829 # reasonable order
827 830 mf2keys = mf2.keys()
828 831 mf2keys.sort()
829 832 for fn in mf2keys:
830 833 if mf1.has_key(fn):
831 834 if mf1.flags(fn) != mf2.flags(fn) or \
832 835 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
833 836 modified.append(fn)
834 837 elif list_clean:
835 838 clean.append(fn)
836 839 del mf1[fn]
837 840 else:
838 841 added.append(fn)
839 842
840 843 removed = mf1.keys()
841 844
842 845 # sort and return results:
843 846 for l in modified, added, removed, deleted, unknown, ignored, clean:
844 847 l.sort()
845 848 return (modified, added, removed, deleted, unknown, ignored, clean)
846 849
847 850 def add(self, list, wlock=None):
848 851 if not wlock:
849 852 wlock = self.wlock()
850 853 for f in list:
851 854 p = self.wjoin(f)
852 855 if not os.path.exists(p):
853 856 self.ui.warn(_("%s does not exist!\n") % f)
854 857 elif not os.path.isfile(p):
855 858 self.ui.warn(_("%s not added: only files supported currently\n")
856 859 % f)
857 860 elif self.dirstate.state(f) in 'an':
858 861 self.ui.warn(_("%s already tracked!\n") % f)
859 862 else:
860 863 self.dirstate.update([f], "a")
861 864
862 865 def forget(self, list, wlock=None):
863 866 if not wlock:
864 867 wlock = self.wlock()
865 868 for f in list:
866 869 if self.dirstate.state(f) not in 'ai':
867 870 self.ui.warn(_("%s not added!\n") % f)
868 871 else:
869 872 self.dirstate.forget([f])
870 873
871 874 def remove(self, list, unlink=False, wlock=None):
872 875 if unlink:
873 876 for f in list:
874 877 try:
875 878 util.unlink(self.wjoin(f))
876 879 except OSError, inst:
877 880 if inst.errno != errno.ENOENT:
878 881 raise
879 882 if not wlock:
880 883 wlock = self.wlock()
881 884 for f in list:
882 885 p = self.wjoin(f)
883 886 if os.path.exists(p):
884 887 self.ui.warn(_("%s still exists!\n") % f)
885 888 elif self.dirstate.state(f) == 'a':
886 889 self.dirstate.forget([f])
887 890 elif f not in self.dirstate:
888 891 self.ui.warn(_("%s not tracked!\n") % f)
889 892 else:
890 893 self.dirstate.update([f], "r")
891 894
892 895 def undelete(self, list, wlock=None):
893 896 p = self.dirstate.parents()[0]
894 897 mn = self.changelog.read(p)[0]
895 898 m = self.manifest.read(mn)
896 899 if not wlock:
897 900 wlock = self.wlock()
898 901 for f in list:
899 902 if self.dirstate.state(f) not in "r":
900 903 self.ui.warn("%s not removed!\n" % f)
901 904 else:
902 905 t = self.file(f).read(m[f])
903 906 self.wwrite(f, t)
904 907 util.set_exec(self.wjoin(f), m.execf(f))
905 908 self.dirstate.update([f], "n")
906 909
907 910 def copy(self, source, dest, wlock=None):
908 911 p = self.wjoin(dest)
909 912 if not os.path.exists(p):
910 913 self.ui.warn(_("%s does not exist!\n") % dest)
911 914 elif not os.path.isfile(p):
912 915 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
913 916 else:
914 917 if not wlock:
915 918 wlock = self.wlock()
916 919 if self.dirstate.state(dest) == '?':
917 920 self.dirstate.update([dest], "a")
918 921 self.dirstate.copy(source, dest)
919 922
920 923 def heads(self, start=None):
921 924 heads = self.changelog.heads(start)
922 925 # sort the output in rev descending order
923 926 heads = [(-self.changelog.rev(h), h) for h in heads]
924 927 heads.sort()
925 928 return [n for (r, n) in heads]
926 929
927 930 # branchlookup returns a dict giving a list of branches for
928 931 # each head. A branch is defined as the tag of a node or
929 932 # the branch of the node's parents. If a node has multiple
930 933 # branch tags, tags are eliminated if they are visible from other
931 934 # branch tags.
932 935 #
933 936 # So, for this graph: a->b->c->d->e
934 937 # \ /
935 938 # aa -----/
936 939 # a has tag 2.6.12
937 940 # d has tag 2.6.13
938 941 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
939 942 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
940 943 # from the list.
941 944 #
942 945 # It is possible that more than one head will have the same branch tag.
943 946 # callers need to check the result for multiple heads under the same
944 947 # branch tag if that is a problem for them (ie checkout of a specific
945 948 # branch).
946 949 #
947 950 # passing in a specific branch will limit the depth of the search
948 951 # through the parents. It won't limit the branches returned in the
949 952 # result though.
950 953 def branchlookup(self, heads=None, branch=None):
951 954 if not heads:
952 955 heads = self.heads()
953 956 headt = [ h for h in heads ]
954 957 chlog = self.changelog
955 958 branches = {}
956 959 merges = []
957 960 seenmerge = {}
958 961
959 962 # traverse the tree once for each head, recording in the branches
960 963 # dict which tags are visible from this head. The branches
961 964 # dict also records which tags are visible from each tag
962 965 # while we traverse.
963 966 while headt or merges:
964 967 if merges:
965 968 n, found = merges.pop()
966 969 visit = [n]
967 970 else:
968 971 h = headt.pop()
969 972 visit = [h]
970 973 found = [h]
971 974 seen = {}
972 975 while visit:
973 976 n = visit.pop()
974 977 if n in seen:
975 978 continue
976 979 pp = chlog.parents(n)
977 980 tags = self.nodetags(n)
978 981 if tags:
979 982 for x in tags:
980 983 if x == 'tip':
981 984 continue
982 985 for f in found:
983 986 branches.setdefault(f, {})[n] = 1
984 987 branches.setdefault(n, {})[n] = 1
985 988 break
986 989 if n not in found:
987 990 found.append(n)
988 991 if branch in tags:
989 992 continue
990 993 seen[n] = 1
991 994 if pp[1] != nullid and n not in seenmerge:
992 995 merges.append((pp[1], [x for x in found]))
993 996 seenmerge[n] = 1
994 997 if pp[0] != nullid:
995 998 visit.append(pp[0])
996 999 # traverse the branches dict, eliminating branch tags from each
997 1000 # head that are visible from another branch tag for that head.
998 1001 out = {}
999 1002 viscache = {}
1000 1003 for h in heads:
1001 1004 def visible(node):
1002 1005 if node in viscache:
1003 1006 return viscache[node]
1004 1007 ret = {}
1005 1008 visit = [node]
1006 1009 while visit:
1007 1010 x = visit.pop()
1008 1011 if x in viscache:
1009 1012 ret.update(viscache[x])
1010 1013 elif x not in ret:
1011 1014 ret[x] = 1
1012 1015 if x in branches:
1013 1016 visit[len(visit):] = branches[x].keys()
1014 1017 viscache[node] = ret
1015 1018 return ret
1016 1019 if h not in branches:
1017 1020 continue
1018 1021 # O(n^2), but somewhat limited. This only searches the
1019 1022 # tags visible from a specific head, not all the tags in the
1020 1023 # whole repo.
1021 1024 for b in branches[h]:
1022 1025 vis = False
1023 1026 for bb in branches[h].keys():
1024 1027 if b != bb:
1025 1028 if b in visible(bb):
1026 1029 vis = True
1027 1030 break
1028 1031 if not vis:
1029 1032 l = out.setdefault(h, [])
1030 1033 l[len(l):] = self.nodetags(b)
1031 1034 return out
1032 1035
1033 1036 def branches(self, nodes):
1034 1037 if not nodes:
1035 1038 nodes = [self.changelog.tip()]
1036 1039 b = []
1037 1040 for n in nodes:
1038 1041 t = n
1039 1042 while 1:
1040 1043 p = self.changelog.parents(n)
1041 1044 if p[1] != nullid or p[0] == nullid:
1042 1045 b.append((t, n, p[0], p[1]))
1043 1046 break
1044 1047 n = p[0]
1045 1048 return b
1046 1049
1047 1050 def between(self, pairs):
1048 1051 r = []
1049 1052
1050 1053 for top, bottom in pairs:
1051 1054 n, l, i = top, [], 0
1052 1055 f = 1
1053 1056
1054 1057 while n != bottom:
1055 1058 p = self.changelog.parents(n)[0]
1056 1059 if i == f:
1057 1060 l.append(n)
1058 1061 f = f * 2
1059 1062 n = p
1060 1063 i += 1
1061 1064
1062 1065 r.append(l)
1063 1066
1064 1067 return r
1065 1068
1066 1069 def findincoming(self, remote, base=None, heads=None, force=False):
1067 1070 """Return list of roots of the subsets of missing nodes from remote
1068 1071
1069 1072 If base dict is specified, assume that these nodes and their parents
1070 1073 exist on the remote side and that no child of a node of base exists
1071 1074 in both remote and self.
1072 1075 Furthermore base will be updated to include the nodes that exists
1073 1076 in self and remote but no children exists in self and remote.
1074 1077 If a list of heads is specified, return only nodes which are heads
1075 1078 or ancestors of these heads.
1076 1079
1077 1080 All the ancestors of base are in self and in remote.
1078 1081 All the descendants of the list returned are missing in self.
1079 1082 (and so we know that the rest of the nodes are missing in remote, see
1080 1083 outgoing)
1081 1084 """
1082 1085 m = self.changelog.nodemap
1083 1086 search = []
1084 1087 fetch = {}
1085 1088 seen = {}
1086 1089 seenbranch = {}
1087 1090 if base == None:
1088 1091 base = {}
1089 1092
1090 1093 if not heads:
1091 1094 heads = remote.heads()
1092 1095
1093 1096 if self.changelog.tip() == nullid:
1094 1097 base[nullid] = 1
1095 1098 if heads != [nullid]:
1096 1099 return [nullid]
1097 1100 return []
1098 1101
1099 1102 # assume we're closer to the tip than the root
1100 1103 # and start by examining the heads
1101 1104 self.ui.status(_("searching for changes\n"))
1102 1105
1103 1106 unknown = []
1104 1107 for h in heads:
1105 1108 if h not in m:
1106 1109 unknown.append(h)
1107 1110 else:
1108 1111 base[h] = 1
1109 1112
1110 1113 if not unknown:
1111 1114 return []
1112 1115
1113 1116 req = dict.fromkeys(unknown)
1114 1117 reqcnt = 0
1115 1118
1116 1119 # search through remote branches
1117 1120 # a 'branch' here is a linear segment of history, with four parts:
1118 1121 # head, root, first parent, second parent
1119 1122 # (a branch always has two parents (or none) by definition)
1120 1123 unknown = remote.branches(unknown)
1121 1124 while unknown:
1122 1125 r = []
1123 1126 while unknown:
1124 1127 n = unknown.pop(0)
1125 1128 if n[0] in seen:
1126 1129 continue
1127 1130
1128 1131 self.ui.debug(_("examining %s:%s\n")
1129 1132 % (short(n[0]), short(n[1])))
1130 1133 if n[0] == nullid: # found the end of the branch
1131 1134 pass
1132 1135 elif n in seenbranch:
1133 1136 self.ui.debug(_("branch already found\n"))
1134 1137 continue
1135 1138 elif n[1] and n[1] in m: # do we know the base?
1136 1139 self.ui.debug(_("found incomplete branch %s:%s\n")
1137 1140 % (short(n[0]), short(n[1])))
1138 1141 search.append(n) # schedule branch range for scanning
1139 1142 seenbranch[n] = 1
1140 1143 else:
1141 1144 if n[1] not in seen and n[1] not in fetch:
1142 1145 if n[2] in m and n[3] in m:
1143 1146 self.ui.debug(_("found new changeset %s\n") %
1144 1147 short(n[1]))
1145 1148 fetch[n[1]] = 1 # earliest unknown
1146 1149 for p in n[2:4]:
1147 1150 if p in m:
1148 1151 base[p] = 1 # latest known
1149 1152
1150 1153 for p in n[2:4]:
1151 1154 if p not in req and p not in m:
1152 1155 r.append(p)
1153 1156 req[p] = 1
1154 1157 seen[n[0]] = 1
1155 1158
1156 1159 if r:
1157 1160 reqcnt += 1
1158 1161 self.ui.debug(_("request %d: %s\n") %
1159 1162 (reqcnt, " ".join(map(short, r))))
1160 1163 for p in xrange(0, len(r), 10):
1161 1164 for b in remote.branches(r[p:p+10]):
1162 1165 self.ui.debug(_("received %s:%s\n") %
1163 1166 (short(b[0]), short(b[1])))
1164 1167 unknown.append(b)
1165 1168
1166 1169 # do binary search on the branches we found
1167 1170 while search:
1168 1171 n = search.pop(0)
1169 1172 reqcnt += 1
1170 1173 l = remote.between([(n[0], n[1])])[0]
1171 1174 l.append(n[1])
1172 1175 p = n[0]
1173 1176 f = 1
1174 1177 for i in l:
1175 1178 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1176 1179 if i in m:
1177 1180 if f <= 2:
1178 1181 self.ui.debug(_("found new branch changeset %s\n") %
1179 1182 short(p))
1180 1183 fetch[p] = 1
1181 1184 base[i] = 1
1182 1185 else:
1183 1186 self.ui.debug(_("narrowed branch search to %s:%s\n")
1184 1187 % (short(p), short(i)))
1185 1188 search.append((p, i))
1186 1189 break
1187 1190 p, f = i, f * 2
1188 1191
1189 1192 # sanity check our fetch list
1190 1193 for f in fetch.keys():
1191 1194 if f in m:
1192 1195 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1193 1196
1194 1197 if base.keys() == [nullid]:
1195 1198 if force:
1196 1199 self.ui.warn(_("warning: repository is unrelated\n"))
1197 1200 else:
1198 1201 raise util.Abort(_("repository is unrelated"))
1199 1202
1200 1203 self.ui.debug(_("found new changesets starting at ") +
1201 1204 " ".join([short(f) for f in fetch]) + "\n")
1202 1205
1203 1206 self.ui.debug(_("%d total queries\n") % reqcnt)
1204 1207
1205 1208 return fetch.keys()
1206 1209
1207 1210 def findoutgoing(self, remote, base=None, heads=None, force=False):
1208 1211 """Return list of nodes that are roots of subsets not in remote
1209 1212
1210 1213 If base dict is specified, assume that these nodes and their parents
1211 1214 exist on the remote side.
1212 1215 If a list of heads is specified, return only nodes which are heads
1213 1216 or ancestors of these heads, and return a second element which
1214 1217 contains all remote heads which get new children.
1215 1218 """
1216 1219 if base == None:
1217 1220 base = {}
1218 1221 self.findincoming(remote, base, heads, force=force)
1219 1222
1220 1223 self.ui.debug(_("common changesets up to ")
1221 1224 + " ".join(map(short, base.keys())) + "\n")
1222 1225
1223 1226 remain = dict.fromkeys(self.changelog.nodemap)
1224 1227
1225 1228 # prune everything remote has from the tree
1226 1229 del remain[nullid]
1227 1230 remove = base.keys()
1228 1231 while remove:
1229 1232 n = remove.pop(0)
1230 1233 if n in remain:
1231 1234 del remain[n]
1232 1235 for p in self.changelog.parents(n):
1233 1236 remove.append(p)
1234 1237
1235 1238 # find every node whose parents have been pruned
1236 1239 subset = []
1237 1240 # find every remote head that will get new children
1238 1241 updated_heads = {}
1239 1242 for n in remain:
1240 1243 p1, p2 = self.changelog.parents(n)
1241 1244 if p1 not in remain and p2 not in remain:
1242 1245 subset.append(n)
1243 1246 if heads:
1244 1247 if p1 in heads:
1245 1248 updated_heads[p1] = True
1246 1249 if p2 in heads:
1247 1250 updated_heads[p2] = True
1248 1251
1249 1252 # this is the set of all roots we have to push
1250 1253 if heads:
1251 1254 return subset, updated_heads.keys()
1252 1255 else:
1253 1256 return subset
1254 1257
1255 1258 def pull(self, remote, heads=None, force=False, lock=None):
1256 1259 mylock = False
1257 1260 if not lock:
1258 1261 lock = self.lock()
1259 1262 mylock = True
1260 1263
1261 1264 try:
1262 1265 fetch = self.findincoming(remote, force=force)
1263 1266 if fetch == [nullid]:
1264 1267 self.ui.status(_("requesting all changes\n"))
1265 1268
1266 1269 if not fetch:
1267 1270 self.ui.status(_("no changes found\n"))
1268 1271 return 0
1269 1272
1270 1273 if heads is None:
1271 1274 cg = remote.changegroup(fetch, 'pull')
1272 1275 else:
1273 1276 if 'changegroupsubset' not in remote.capabilities:
1274 1277 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1275 1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1276 1279 return self.addchangegroup(cg, 'pull', remote.url())
1277 1280 finally:
1278 1281 if mylock:
1279 1282 lock.release()
1280 1283
1281 1284 def push(self, remote, force=False, revs=None):
1282 1285 # there are two ways to push to remote repo:
1283 1286 #
1284 1287 # addchangegroup assumes local user can lock remote
1285 1288 # repo (local filesystem, old ssh servers).
1286 1289 #
1287 1290 # unbundle assumes local user cannot lock remote repo (new ssh
1288 1291 # servers, http servers).
1289 1292
1290 1293 if remote.capable('unbundle'):
1291 1294 return self.push_unbundle(remote, force, revs)
1292 1295 return self.push_addchangegroup(remote, force, revs)
1293 1296
1294 1297 def prepush(self, remote, force, revs):
1295 1298 base = {}
1296 1299 remote_heads = remote.heads()
1297 1300 inc = self.findincoming(remote, base, remote_heads, force=force)
1298 1301 if not force and inc:
1299 1302 self.ui.warn(_("abort: unsynced remote changes!\n"))
1300 1303 self.ui.status(_("(did you forget to sync?"
1301 1304 " use push -f to force)\n"))
1302 1305 return None, 1
1303 1306
1304 1307 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1305 1308 if revs is not None:
1306 1309 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1307 1310 else:
1308 1311 bases, heads = update, self.changelog.heads()
1309 1312
1310 1313 if not bases:
1311 1314 self.ui.status(_("no changes found\n"))
1312 1315 return None, 1
1313 1316 elif not force:
1314 1317 # FIXME we don't properly detect creation of new heads
1315 1318 # in the push -r case, assume the user knows what he's doing
1316 1319 if not revs and len(remote_heads) < len(heads) \
1317 1320 and remote_heads != [nullid]:
1318 1321 self.ui.warn(_("abort: push creates new remote branches!\n"))
1319 1322 self.ui.status(_("(did you forget to merge?"
1320 1323 " use push -f to force)\n"))
1321 1324 return None, 1
1322 1325
1323 1326 if revs is None:
1324 1327 cg = self.changegroup(update, 'push')
1325 1328 else:
1326 1329 cg = self.changegroupsubset(update, revs, 'push')
1327 1330 return cg, remote_heads
1328 1331
1329 1332 def push_addchangegroup(self, remote, force, revs):
1330 1333 lock = remote.lock()
1331 1334
1332 1335 ret = self.prepush(remote, force, revs)
1333 1336 if ret[0] is not None:
1334 1337 cg, remote_heads = ret
1335 1338 return remote.addchangegroup(cg, 'push', self.url())
1336 1339 return ret[1]
1337 1340
1338 1341 def push_unbundle(self, remote, force, revs):
1339 1342 # local repo finds heads on server, finds out what revs it
1340 1343 # must push. once revs transferred, if server finds it has
1341 1344 # different heads (someone else won commit/push race), server
1342 1345 # aborts.
1343 1346
1344 1347 ret = self.prepush(remote, force, revs)
1345 1348 if ret[0] is not None:
1346 1349 cg, remote_heads = ret
1347 1350 if force: remote_heads = ['force']
1348 1351 return remote.unbundle(cg, remote_heads, 'push')
1349 1352 return ret[1]
1350 1353
1351 1354 def changegroupinfo(self, nodes):
1352 1355 self.ui.note(_("%d changesets found\n") % len(nodes))
1353 1356 if self.ui.debugflag:
1354 1357 self.ui.debug(_("List of changesets:\n"))
1355 1358 for node in nodes:
1356 1359 self.ui.debug("%s\n" % hex(node))
1357 1360
1358 1361 def changegroupsubset(self, bases, heads, source):
1359 1362 """This function generates a changegroup consisting of all the nodes
1360 1363 that are descendents of any of the bases, and ancestors of any of
1361 1364 the heads.
1362 1365
1363 1366 It is fairly complex as determining which filenodes and which
1364 1367 manifest nodes need to be included for the changeset to be complete
1365 1368 is non-trivial.
1366 1369
1367 1370 Another wrinkle is doing the reverse, figuring out which changeset in
1368 1371 the changegroup a particular filenode or manifestnode belongs to."""
1369 1372
1370 1373 self.hook('preoutgoing', throw=True, source=source)
1371 1374
1372 1375 # Set up some initial variables
1373 1376 # Make it easy to refer to self.changelog
1374 1377 cl = self.changelog
1375 1378 # msng is short for missing - compute the list of changesets in this
1376 1379 # changegroup.
1377 1380 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1378 1381 self.changegroupinfo(msng_cl_lst)
1379 1382 # Some bases may turn out to be superfluous, and some heads may be
1380 1383 # too. nodesbetween will return the minimal set of bases and heads
1381 1384 # necessary to re-create the changegroup.
1382 1385
1383 1386 # Known heads are the list of heads that it is assumed the recipient
1384 1387 # of this changegroup will know about.
1385 1388 knownheads = {}
1386 1389 # We assume that all parents of bases are known heads.
1387 1390 for n in bases:
1388 1391 for p in cl.parents(n):
1389 1392 if p != nullid:
1390 1393 knownheads[p] = 1
1391 1394 knownheads = knownheads.keys()
1392 1395 if knownheads:
1393 1396 # Now that we know what heads are known, we can compute which
1394 1397 # changesets are known. The recipient must know about all
1395 1398 # changesets required to reach the known heads from the null
1396 1399 # changeset.
1397 1400 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1398 1401 junk = None
1399 1402 # Transform the list into an ersatz set.
1400 1403 has_cl_set = dict.fromkeys(has_cl_set)
1401 1404 else:
1402 1405 # If there were no known heads, the recipient cannot be assumed to
1403 1406 # know about any changesets.
1404 1407 has_cl_set = {}
1405 1408
1406 1409 # Make it easy to refer to self.manifest
1407 1410 mnfst = self.manifest
1408 1411 # We don't know which manifests are missing yet
1409 1412 msng_mnfst_set = {}
1410 1413 # Nor do we know which filenodes are missing.
1411 1414 msng_filenode_set = {}
1412 1415
1413 1416 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1414 1417 junk = None
1415 1418
1416 1419 # A changeset always belongs to itself, so the changenode lookup
1417 1420 # function for a changenode is identity.
1418 1421 def identity(x):
1419 1422 return x
1420 1423
1421 1424 # A function generating function. Sets up an environment for the
1422 1425 # inner function.
1423 1426 def cmp_by_rev_func(revlog):
1424 1427 # Compare two nodes by their revision number in the environment's
1425 1428 # revision history. Since the revision number both represents the
1426 1429 # most efficient order to read the nodes in, and represents a
1427 1430 # topological sorting of the nodes, this function is often useful.
1428 1431 def cmp_by_rev(a, b):
1429 1432 return cmp(revlog.rev(a), revlog.rev(b))
1430 1433 return cmp_by_rev
1431 1434
1432 1435 # If we determine that a particular file or manifest node must be a
1433 1436 # node that the recipient of the changegroup will already have, we can
1434 1437 # also assume the recipient will have all the parents. This function
1435 1438 # prunes them from the set of missing nodes.
1436 1439 def prune_parents(revlog, hasset, msngset):
1437 1440 haslst = hasset.keys()
1438 1441 haslst.sort(cmp_by_rev_func(revlog))
1439 1442 for node in haslst:
1440 1443 parentlst = [p for p in revlog.parents(node) if p != nullid]
1441 1444 while parentlst:
1442 1445 n = parentlst.pop()
1443 1446 if n not in hasset:
1444 1447 hasset[n] = 1
1445 1448 p = [p for p in revlog.parents(n) if p != nullid]
1446 1449 parentlst.extend(p)
1447 1450 for n in hasset:
1448 1451 msngset.pop(n, None)
1449 1452
1450 1453 # This is a function generating function used to set up an environment
1451 1454 # for the inner function to execute in.
1452 1455 def manifest_and_file_collector(changedfileset):
1453 1456 # This is an information gathering function that gathers
1454 1457 # information from each changeset node that goes out as part of
1455 1458 # the changegroup. The information gathered is a list of which
1456 1459 # manifest nodes are potentially required (the recipient may
1457 1460 # already have them) and total list of all files which were
1458 1461 # changed in any changeset in the changegroup.
1459 1462 #
1460 1463 # We also remember the first changenode we saw any manifest
1461 1464 # referenced by so we can later determine which changenode 'owns'
1462 1465 # the manifest.
1463 1466 def collect_manifests_and_files(clnode):
1464 1467 c = cl.read(clnode)
1465 1468 for f in c[3]:
1466 1469 # This is to make sure we only have one instance of each
1467 1470 # filename string for each filename.
1468 1471 changedfileset.setdefault(f, f)
1469 1472 msng_mnfst_set.setdefault(c[0], clnode)
1470 1473 return collect_manifests_and_files
1471 1474
1472 1475 # Figure out which manifest nodes (of the ones we think might be part
1473 1476 # of the changegroup) the recipient must know about and remove them
1474 1477 # from the changegroup.
1475 1478 def prune_manifests():
1476 1479 has_mnfst_set = {}
1477 1480 for n in msng_mnfst_set:
1478 1481 # If a 'missing' manifest thinks it belongs to a changenode
1479 1482 # the recipient is assumed to have, obviously the recipient
1480 1483 # must have that manifest.
1481 1484 linknode = cl.node(mnfst.linkrev(n))
1482 1485 if linknode in has_cl_set:
1483 1486 has_mnfst_set[n] = 1
1484 1487 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1485 1488
1486 1489 # Use the information collected in collect_manifests_and_files to say
1487 1490 # which changenode any manifestnode belongs to.
1488 1491 def lookup_manifest_link(mnfstnode):
1489 1492 return msng_mnfst_set[mnfstnode]
1490 1493
1491 1494 # A function generating function that sets up the initial environment
1492 1495 # the inner function.
1493 1496 def filenode_collector(changedfiles):
1494 1497 next_rev = [0]
1495 1498 # This gathers information from each manifestnode included in the
1496 1499 # changegroup about which filenodes the manifest node references
1497 1500 # so we can include those in the changegroup too.
1498 1501 #
1499 1502 # It also remembers which changenode each filenode belongs to. It
1500 1503 # does this by assuming the a filenode belongs to the changenode
1501 1504 # the first manifest that references it belongs to.
1502 1505 def collect_msng_filenodes(mnfstnode):
1503 1506 r = mnfst.rev(mnfstnode)
1504 1507 if r == next_rev[0]:
1505 1508 # If the last rev we looked at was the one just previous,
1506 1509 # we only need to see a diff.
1507 1510 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1508 1511 # For each line in the delta
1509 1512 for dline in delta.splitlines():
1510 1513 # get the filename and filenode for that line
1511 1514 f, fnode = dline.split('\0')
1512 1515 fnode = bin(fnode[:40])
1513 1516 f = changedfiles.get(f, None)
1514 1517 # And if the file is in the list of files we care
1515 1518 # about.
1516 1519 if f is not None:
1517 1520 # Get the changenode this manifest belongs to
1518 1521 clnode = msng_mnfst_set[mnfstnode]
1519 1522 # Create the set of filenodes for the file if
1520 1523 # there isn't one already.
1521 1524 ndset = msng_filenode_set.setdefault(f, {})
1522 1525 # And set the filenode's changelog node to the
1523 1526 # manifest's if it hasn't been set already.
1524 1527 ndset.setdefault(fnode, clnode)
1525 1528 else:
1526 1529 # Otherwise we need a full manifest.
1527 1530 m = mnfst.read(mnfstnode)
1528 1531 # For every file in we care about.
1529 1532 for f in changedfiles:
1530 1533 fnode = m.get(f, None)
1531 1534 # If it's in the manifest
1532 1535 if fnode is not None:
1533 1536 # See comments above.
1534 1537 clnode = msng_mnfst_set[mnfstnode]
1535 1538 ndset = msng_filenode_set.setdefault(f, {})
1536 1539 ndset.setdefault(fnode, clnode)
1537 1540 # Remember the revision we hope to see next.
1538 1541 next_rev[0] = r + 1
1539 1542 return collect_msng_filenodes
1540 1543
1541 1544 # We have a list of filenodes we think we need for a file, lets remove
1542 1545 # all those we now the recipient must have.
1543 1546 def prune_filenodes(f, filerevlog):
1544 1547 msngset = msng_filenode_set[f]
1545 1548 hasset = {}
1546 1549 # If a 'missing' filenode thinks it belongs to a changenode we
1547 1550 # assume the recipient must have, then the recipient must have
1548 1551 # that filenode.
1549 1552 for n in msngset:
1550 1553 clnode = cl.node(filerevlog.linkrev(n))
1551 1554 if clnode in has_cl_set:
1552 1555 hasset[n] = 1
1553 1556 prune_parents(filerevlog, hasset, msngset)
1554 1557
1555 1558 # A function generator function that sets up the a context for the
1556 1559 # inner function.
1557 1560 def lookup_filenode_link_func(fname):
1558 1561 msngset = msng_filenode_set[fname]
1559 1562 # Lookup the changenode the filenode belongs to.
1560 1563 def lookup_filenode_link(fnode):
1561 1564 return msngset[fnode]
1562 1565 return lookup_filenode_link
1563 1566
1564 1567 # Now that we have all theses utility functions to help out and
1565 1568 # logically divide up the task, generate the group.
1566 1569 def gengroup():
1567 1570 # The set of changed files starts empty.
1568 1571 changedfiles = {}
1569 1572 # Create a changenode group generator that will call our functions
1570 1573 # back to lookup the owning changenode and collect information.
1571 1574 group = cl.group(msng_cl_lst, identity,
1572 1575 manifest_and_file_collector(changedfiles))
1573 1576 for chnk in group:
1574 1577 yield chnk
1575 1578
1576 1579 # The list of manifests has been collected by the generator
1577 1580 # calling our functions back.
1578 1581 prune_manifests()
1579 1582 msng_mnfst_lst = msng_mnfst_set.keys()
1580 1583 # Sort the manifestnodes by revision number.
1581 1584 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1582 1585 # Create a generator for the manifestnodes that calls our lookup
1583 1586 # and data collection functions back.
1584 1587 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1585 1588 filenode_collector(changedfiles))
1586 1589 for chnk in group:
1587 1590 yield chnk
1588 1591
1589 1592 # These are no longer needed, dereference and toss the memory for
1590 1593 # them.
1591 1594 msng_mnfst_lst = None
1592 1595 msng_mnfst_set.clear()
1593 1596
1594 1597 changedfiles = changedfiles.keys()
1595 1598 changedfiles.sort()
1596 1599 # Go through all our files in order sorted by name.
1597 1600 for fname in changedfiles:
1598 1601 filerevlog = self.file(fname)
1599 1602 # Toss out the filenodes that the recipient isn't really
1600 1603 # missing.
1601 1604 if msng_filenode_set.has_key(fname):
1602 1605 prune_filenodes(fname, filerevlog)
1603 1606 msng_filenode_lst = msng_filenode_set[fname].keys()
1604 1607 else:
1605 1608 msng_filenode_lst = []
1606 1609 # If any filenodes are left, generate the group for them,
1607 1610 # otherwise don't bother.
1608 1611 if len(msng_filenode_lst) > 0:
1609 1612 yield changegroup.genchunk(fname)
1610 1613 # Sort the filenodes by their revision #
1611 1614 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1612 1615 # Create a group generator and only pass in a changenode
1613 1616 # lookup function as we need to collect no information
1614 1617 # from filenodes.
1615 1618 group = filerevlog.group(msng_filenode_lst,
1616 1619 lookup_filenode_link_func(fname))
1617 1620 for chnk in group:
1618 1621 yield chnk
1619 1622 if msng_filenode_set.has_key(fname):
1620 1623 # Don't need this anymore, toss it to free memory.
1621 1624 del msng_filenode_set[fname]
1622 1625 # Signal that no more groups are left.
1623 1626 yield changegroup.closechunk()
1624 1627
1625 1628 if msng_cl_lst:
1626 1629 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1627 1630
1628 1631 return util.chunkbuffer(gengroup())
1629 1632
1630 1633 def changegroup(self, basenodes, source):
1631 1634 """Generate a changegroup of all nodes that we have that a recipient
1632 1635 doesn't.
1633 1636
1634 1637 This is much easier than the previous function as we can assume that
1635 1638 the recipient has any changenode we aren't sending them."""
1636 1639
1637 1640 self.hook('preoutgoing', throw=True, source=source)
1638 1641
1639 1642 cl = self.changelog
1640 1643 nodes = cl.nodesbetween(basenodes, None)[0]
1641 1644 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1642 1645 self.changegroupinfo(nodes)
1643 1646
1644 1647 def identity(x):
1645 1648 return x
1646 1649
1647 1650 def gennodelst(revlog):
1648 1651 for r in xrange(0, revlog.count()):
1649 1652 n = revlog.node(r)
1650 1653 if revlog.linkrev(n) in revset:
1651 1654 yield n
1652 1655
1653 1656 def changed_file_collector(changedfileset):
1654 1657 def collect_changed_files(clnode):
1655 1658 c = cl.read(clnode)
1656 1659 for fname in c[3]:
1657 1660 changedfileset[fname] = 1
1658 1661 return collect_changed_files
1659 1662
1660 1663 def lookuprevlink_func(revlog):
1661 1664 def lookuprevlink(n):
1662 1665 return cl.node(revlog.linkrev(n))
1663 1666 return lookuprevlink
1664 1667
1665 1668 def gengroup():
1666 1669 # construct a list of all changed files
1667 1670 changedfiles = {}
1668 1671
1669 1672 for chnk in cl.group(nodes, identity,
1670 1673 changed_file_collector(changedfiles)):
1671 1674 yield chnk
1672 1675 changedfiles = changedfiles.keys()
1673 1676 changedfiles.sort()
1674 1677
1675 1678 mnfst = self.manifest
1676 1679 nodeiter = gennodelst(mnfst)
1677 1680 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1678 1681 yield chnk
1679 1682
1680 1683 for fname in changedfiles:
1681 1684 filerevlog = self.file(fname)
1682 1685 nodeiter = gennodelst(filerevlog)
1683 1686 nodeiter = list(nodeiter)
1684 1687 if nodeiter:
1685 1688 yield changegroup.genchunk(fname)
1686 1689 lookup = lookuprevlink_func(filerevlog)
1687 1690 for chnk in filerevlog.group(nodeiter, lookup):
1688 1691 yield chnk
1689 1692
1690 1693 yield changegroup.closechunk()
1691 1694
1692 1695 if nodes:
1693 1696 self.hook('outgoing', node=hex(nodes[0]), source=source)
1694 1697
1695 1698 return util.chunkbuffer(gengroup())
1696 1699
1697 1700 def addchangegroup(self, source, srctype, url):
1698 1701 """add changegroup to repo.
1699 1702 returns number of heads modified or added + 1."""
1700 1703
1701 1704 def csmap(x):
1702 1705 self.ui.debug(_("add changeset %s\n") % short(x))
1703 1706 return cl.count()
1704 1707
1705 1708 def revmap(x):
1706 1709 return cl.rev(x)
1707 1710
1708 1711 if not source:
1709 1712 return 0
1710 1713
1711 1714 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1712 1715
1713 1716 changesets = files = revisions = 0
1714 1717
1715 1718 tr = self.transaction()
1716 1719
1717 1720 # write changelog data to temp files so concurrent readers will not see
1718 1721 # inconsistent view
1719 1722 cl = None
1720 1723 try:
1721 1724 cl = appendfile.appendchangelog(self.sopener,
1722 1725 self.changelog.version)
1723 1726
1724 1727 oldheads = len(cl.heads())
1725 1728
1726 1729 # pull off the changeset group
1727 1730 self.ui.status(_("adding changesets\n"))
1728 1731 cor = cl.count() - 1
1729 1732 chunkiter = changegroup.chunkiter(source)
1730 1733 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1731 1734 raise util.Abort(_("received changelog group is empty"))
1732 1735 cnr = cl.count() - 1
1733 1736 changesets = cnr - cor
1734 1737
1735 1738 # pull off the manifest group
1736 1739 self.ui.status(_("adding manifests\n"))
1737 1740 chunkiter = changegroup.chunkiter(source)
1738 1741 # no need to check for empty manifest group here:
1739 1742 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1740 1743 # no new manifest will be created and the manifest group will
1741 1744 # be empty during the pull
1742 1745 self.manifest.addgroup(chunkiter, revmap, tr)
1743 1746
1744 1747 # process the files
1745 1748 self.ui.status(_("adding file changes\n"))
1746 1749 while 1:
1747 1750 f = changegroup.getchunk(source)
1748 1751 if not f:
1749 1752 break
1750 1753 self.ui.debug(_("adding %s revisions\n") % f)
1751 1754 fl = self.file(f)
1752 1755 o = fl.count()
1753 1756 chunkiter = changegroup.chunkiter(source)
1754 1757 if fl.addgroup(chunkiter, revmap, tr) is None:
1755 1758 raise util.Abort(_("received file revlog group is empty"))
1756 1759 revisions += fl.count() - o
1757 1760 files += 1
1758 1761
1759 1762 cl.writedata()
1760 1763 finally:
1761 1764 if cl:
1762 1765 cl.cleanup()
1763 1766
1764 1767 # make changelog see real files again
1765 1768 self.changelog = changelog.changelog(self.sopener,
1766 1769 self.changelog.version)
1767 1770 self.changelog.checkinlinesize(tr)
1768 1771
1769 1772 newheads = len(self.changelog.heads())
1770 1773 heads = ""
1771 1774 if oldheads and newheads != oldheads:
1772 1775 heads = _(" (%+d heads)") % (newheads - oldheads)
1773 1776
1774 1777 self.ui.status(_("added %d changesets"
1775 1778 " with %d changes to %d files%s\n")
1776 1779 % (changesets, revisions, files, heads))
1777 1780
1778 1781 if changesets > 0:
1779 1782 self.hook('pretxnchangegroup', throw=True,
1780 1783 node=hex(self.changelog.node(cor+1)), source=srctype,
1781 1784 url=url)
1782 1785
1783 1786 tr.close()
1784 1787
1785 1788 if changesets > 0:
1786 1789 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1787 1790 source=srctype, url=url)
1788 1791
1789 1792 for i in xrange(cor + 1, cnr + 1):
1790 1793 self.hook("incoming", node=hex(self.changelog.node(i)),
1791 1794 source=srctype, url=url)
1792 1795
1793 1796 return newheads - oldheads + 1
1794 1797
1795 1798
1796 1799 def stream_in(self, remote):
1797 1800 fp = remote.stream_out()
1798 1801 l = fp.readline()
1799 1802 try:
1800 1803 resp = int(l)
1801 1804 except ValueError:
1802 1805 raise util.UnexpectedOutput(
1803 1806 _('Unexpected response from remote server:'), l)
1804 1807 if resp != 0:
1805 1808 raise util.Abort(_('operation forbidden by server'))
1806 1809 self.ui.status(_('streaming all changes\n'))
1807 1810 l = fp.readline()
1808 1811 try:
1809 1812 total_files, total_bytes = map(int, l.split(' ', 1))
1810 1813 except ValueError, TypeError:
1811 1814 raise util.UnexpectedOutput(
1812 1815 _('Unexpected response from remote server:'), l)
1813 1816 self.ui.status(_('%d files to transfer, %s of data\n') %
1814 1817 (total_files, util.bytecount(total_bytes)))
1815 1818 start = time.time()
1816 1819 for i in xrange(total_files):
1817 1820 l = fp.readline()
1818 1821 try:
1819 1822 name, size = l.split('\0', 1)
1820 1823 size = int(size)
1821 1824 except ValueError, TypeError:
1822 1825 raise util.UnexpectedOutput(
1823 1826 _('Unexpected response from remote server:'), l)
1824 1827 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1825 1828 ofp = self.sopener(name, 'w')
1826 1829 for chunk in util.filechunkiter(fp, limit=size):
1827 1830 ofp.write(chunk)
1828 1831 ofp.close()
1829 1832 elapsed = time.time() - start
1830 1833 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1831 1834 (util.bytecount(total_bytes), elapsed,
1832 1835 util.bytecount(total_bytes / elapsed)))
1833 1836 self.reload()
1834 1837 return len(self.heads()) + 1
1835 1838
1836 1839 def clone(self, remote, heads=[], stream=False):
1837 1840 '''clone remote repository.
1838 1841
1839 1842 keyword arguments:
1840 1843 heads: list of revs to clone (forces use of pull)
1841 1844 stream: use streaming clone if possible'''
1842 1845
1843 1846 # now, all clients that can request uncompressed clones can
1844 1847 # read repo formats supported by all servers that can serve
1845 1848 # them.
1846 1849
1847 1850 # if revlog format changes, client will have to check version
1848 1851 # and format flags on "stream" capability, and use
1849 1852 # uncompressed only if compatible.
1850 1853
1851 1854 if stream and not heads and remote.capable('stream'):
1852 1855 return self.stream_in(remote)
1853 1856 return self.pull(remote, heads)
1854 1857
1855 1858 # used to avoid circular references so destructors work
1856 1859 def aftertrans(base):
1857 1860 p = base
1858 1861 def a():
1859 1862 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1860 1863 util.rename(os.path.join(p, "journal.dirstate"),
1861 1864 os.path.join(p, "undo.dirstate"))
1862 1865 return a
1863 1866
1864 1867 def instance(ui, path, create):
1865 1868 return localrepository(ui, util.drop_scheme('file', path), create)
1866 1869
1867 1870 def islocal(path):
1868 1871 return True
General Comments 0
You need to be logged in to leave comments. Login now