##// END OF EJS Templates
localrepo: change aftertrans to be independant of the store path
Benoit Boissinot -
r3790:f183c185 default
parent child Browse files
Show More
@@ -1,1912 +1,1913 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 else:
41 41 raise repo.RepoError(_("repository %s not found") % path)
42 42 elif create:
43 43 raise repo.RepoError(_("repository %s already exists") % path)
44 44
45 45 self.root = os.path.realpath(path)
46 46 self.origroot = path
47 47 self.ui = ui.ui(parentui=parentui)
48 48 self.opener = util.opener(self.path)
49 49 self.sopener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.sopener, v)
70 70 self.changelog = changelog.changelog(self.sopener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 # local tags are stored in the current charset
202 202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 203 self.hook('tag', node=hex(node), tag=name, local=local)
204 204 return
205 205
206 206 for x in self.status()[:5]:
207 207 if '.hgtags' in x:
208 208 raise util.Abort(_('working copy of .hgtags is changed '
209 209 '(please commit .hgtags manually)'))
210 210
211 211 # committed tags are stored in UTF-8
212 212 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 213 self.wfile('.hgtags', 'ab').write(line)
214 214 if self.dirstate.state('.hgtags') == '?':
215 215 self.add(['.hgtags'])
216 216
217 217 self.commit(['.hgtags'], message, user, date)
218 218 self.hook('tag', node=hex(node), tag=name, local=local)
219 219
220 220 def tags(self):
221 221 '''return a mapping of tag to node'''
222 222 if not self.tagscache:
223 223 self.tagscache = {}
224 224
225 225 def parsetag(line, context):
226 226 if not line:
227 227 return
228 228 s = l.split(" ", 1)
229 229 if len(s) != 2:
230 230 self.ui.warn(_("%s: cannot parse entry\n") % context)
231 231 return
232 232 node, key = s
233 233 key = util.tolocal(key.strip()) # stored in UTF-8
234 234 try:
235 235 bin_n = bin(node)
236 236 except TypeError:
237 237 self.ui.warn(_("%s: node '%s' is not well formed\n") %
238 238 (context, node))
239 239 return
240 240 if bin_n not in self.changelog.nodemap:
241 241 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
242 242 (context, key))
243 243 return
244 244 self.tagscache[key] = bin_n
245 245
246 246 # read the tags file from each head, ending with the tip,
247 247 # and add each tag found to the map, with "newer" ones
248 248 # taking precedence
249 249 f = None
250 250 for rev, node, fnode in self._hgtagsnodes():
251 251 f = (f and f.filectx(fnode) or
252 252 self.filectx('.hgtags', fileid=fnode))
253 253 count = 0
254 254 for l in f.data().splitlines():
255 255 count += 1
256 256 parsetag(l, _("%s, line %d") % (str(f), count))
257 257
258 258 try:
259 259 f = self.opener("localtags")
260 260 count = 0
261 261 for l in f:
262 262 # localtags are stored in the local character set
263 263 # while the internal tag table is stored in UTF-8
264 264 l = util.fromlocal(l)
265 265 count += 1
266 266 parsetag(l, _("localtags, line %d") % count)
267 267 except IOError:
268 268 pass
269 269
270 270 self.tagscache['tip'] = self.changelog.tip()
271 271
272 272 return self.tagscache
273 273
274 274 def _hgtagsnodes(self):
275 275 heads = self.heads()
276 276 heads.reverse()
277 277 last = {}
278 278 ret = []
279 279 for node in heads:
280 280 c = self.changectx(node)
281 281 rev = c.rev()
282 282 try:
283 283 fnode = c.filenode('.hgtags')
284 284 except repo.LookupError:
285 285 continue
286 286 ret.append((rev, node, fnode))
287 287 if fnode in last:
288 288 ret[last[fnode]] = None
289 289 last[fnode] = len(ret) - 1
290 290 return [item for item in ret if item]
291 291
292 292 def tagslist(self):
293 293 '''return a list of tags ordered by revision'''
294 294 l = []
295 295 for t, n in self.tags().items():
296 296 try:
297 297 r = self.changelog.rev(n)
298 298 except:
299 299 r = -2 # sort to the beginning of the list if unknown
300 300 l.append((r, t, n))
301 301 l.sort()
302 302 return [(t, n) for r, t, n in l]
303 303
304 304 def nodetags(self, node):
305 305 '''return the tags associated with a node'''
306 306 if not self.nodetagscache:
307 307 self.nodetagscache = {}
308 308 for t, n in self.tags().items():
309 309 self.nodetagscache.setdefault(n, []).append(t)
310 310 return self.nodetagscache.get(node, [])
311 311
312 312 def branchtags(self):
313 313 if self.branchcache != None:
314 314 return self.branchcache
315 315
316 316 self.branchcache = {} # avoid recursion in changectx
317 317
318 318 partial, last, lrev = self._readbranchcache()
319 319
320 320 tiprev = self.changelog.count() - 1
321 321 if lrev != tiprev:
322 322 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 324
325 325 # the branch cache is stored on disk as UTF-8, but in the local
326 326 # charset internally
327 327 for k, v in partial.items():
328 328 self.branchcache[util.tolocal(k)] = v
329 329 return self.branchcache
330 330
331 331 def _readbranchcache(self):
332 332 partial = {}
333 333 try:
334 334 f = self.opener("branches.cache")
335 335 lines = f.read().split('\n')
336 336 f.close()
337 337 last, lrev = lines.pop(0).rstrip().split(" ", 1)
338 338 last, lrev = bin(last), int(lrev)
339 339 if not (lrev < self.changelog.count() and
340 340 self.changelog.node(lrev) == last): # sanity check
341 341 # invalidate the cache
342 342 raise ValueError('Invalid branch cache: unknown tip')
343 343 for l in lines:
344 344 if not l: continue
345 345 node, label = l.rstrip().split(" ", 1)
346 346 partial[label] = bin(node)
347 347 except (KeyboardInterrupt, util.SignalInterrupt):
348 348 raise
349 349 except Exception, inst:
350 350 if self.ui.debugflag:
351 351 self.ui.warn(str(inst), '\n')
352 352 partial, last, lrev = {}, nullid, nullrev
353 353 return partial, last, lrev
354 354
355 355 def _writebranchcache(self, branches, tip, tiprev):
356 356 try:
357 357 f = self.opener("branches.cache", "w")
358 358 f.write("%s %s\n" % (hex(tip), tiprev))
359 359 for label, node in branches.iteritems():
360 360 f.write("%s %s\n" % (hex(node), label))
361 361 except IOError:
362 362 pass
363 363
364 364 def _updatebranchcache(self, partial, start, end):
365 365 for r in xrange(start, end):
366 366 c = self.changectx(r)
367 367 b = c.branch()
368 368 if b:
369 369 partial[b] = c.node()
370 370
371 371 def lookup(self, key):
372 372 if key == '.':
373 373 key = self.dirstate.parents()[0]
374 374 if key == nullid:
375 375 raise repo.RepoError(_("no revision checked out"))
376 376 n = self.changelog._match(key)
377 377 if n:
378 378 return n
379 379 if key in self.tags():
380 380 return self.tags()[key]
381 381 if key in self.branchtags():
382 382 return self.branchtags()[key]
383 383 n = self.changelog._partialmatch(key)
384 384 if n:
385 385 return n
386 386 raise repo.RepoError(_("unknown revision '%s'") % key)
387 387
388 388 def dev(self):
389 389 return os.lstat(self.path).st_dev
390 390
391 391 def local(self):
392 392 return True
393 393
394 394 def join(self, f):
395 395 return os.path.join(self.path, f)
396 396
397 397 def sjoin(self, f):
398 398 return os.path.join(self.path, f)
399 399
400 400 def wjoin(self, f):
401 401 return os.path.join(self.root, f)
402 402
403 403 def file(self, f):
404 404 if f[0] == '/':
405 405 f = f[1:]
406 406 return filelog.filelog(self.sopener, f, self.revlogversion)
407 407
408 408 def changectx(self, changeid=None):
409 409 return context.changectx(self, changeid)
410 410
411 411 def workingctx(self):
412 412 return context.workingctx(self)
413 413
414 414 def parents(self, changeid=None):
415 415 '''
416 416 get list of changectxs for parents of changeid or working directory
417 417 '''
418 418 if changeid is None:
419 419 pl = self.dirstate.parents()
420 420 else:
421 421 n = self.changelog.lookup(changeid)
422 422 pl = self.changelog.parents(n)
423 423 if pl[1] == nullid:
424 424 return [self.changectx(pl[0])]
425 425 return [self.changectx(pl[0]), self.changectx(pl[1])]
426 426
427 427 def filectx(self, path, changeid=None, fileid=None):
428 428 """changeid can be a changeset revision, node, or tag.
429 429 fileid can be a file revision or node."""
430 430 return context.filectx(self, path, changeid, fileid)
431 431
432 432 def getcwd(self):
433 433 return self.dirstate.getcwd()
434 434
435 435 def wfile(self, f, mode='r'):
436 436 return self.wopener(f, mode)
437 437
438 438 def wread(self, filename):
439 439 if self.encodepats == None:
440 440 l = []
441 441 for pat, cmd in self.ui.configitems("encode"):
442 442 mf = util.matcher(self.root, "", [pat], [], [])[1]
443 443 l.append((mf, cmd))
444 444 self.encodepats = l
445 445
446 446 data = self.wopener(filename, 'r').read()
447 447
448 448 for mf, cmd in self.encodepats:
449 449 if mf(filename):
450 450 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
451 451 data = util.filter(data, cmd)
452 452 break
453 453
454 454 return data
455 455
456 456 def wwrite(self, filename, data, fd=None):
457 457 if self.decodepats == None:
458 458 l = []
459 459 for pat, cmd in self.ui.configitems("decode"):
460 460 mf = util.matcher(self.root, "", [pat], [], [])[1]
461 461 l.append((mf, cmd))
462 462 self.decodepats = l
463 463
464 464 for mf, cmd in self.decodepats:
465 465 if mf(filename):
466 466 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
467 467 data = util.filter(data, cmd)
468 468 break
469 469
470 470 if fd:
471 471 return fd.write(data)
472 472 return self.wopener(filename, 'w').write(data)
473 473
474 474 def transaction(self):
475 475 tr = self.transhandle
476 476 if tr != None and tr.running():
477 477 return tr.nest()
478 478
479 479 # save dirstate for rollback
480 480 try:
481 481 ds = self.opener("dirstate").read()
482 482 except IOError:
483 483 ds = ""
484 484 self.opener("journal.dirstate", "w").write(ds)
485 485
486 renames = [(self.sjoin("journal"), self.sjoin("undo")),
487 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
486 488 tr = transaction.transaction(self.ui.warn, self.sopener,
487 489 self.sjoin("journal"),
488 aftertrans(self.path))
490 aftertrans(renames))
489 491 self.transhandle = tr
490 492 return tr
491 493
492 494 def recover(self):
493 495 l = self.lock()
494 496 if os.path.exists(self.sjoin("journal")):
495 497 self.ui.status(_("rolling back interrupted transaction\n"))
496 498 transaction.rollback(self.sopener, self.sjoin("journal"))
497 499 self.reload()
498 500 return True
499 501 else:
500 502 self.ui.warn(_("no interrupted transaction available\n"))
501 503 return False
502 504
503 505 def rollback(self, wlock=None):
504 506 if not wlock:
505 507 wlock = self.wlock()
506 508 l = self.lock()
507 509 if os.path.exists(self.sjoin("undo")):
508 510 self.ui.status(_("rolling back last transaction\n"))
509 511 transaction.rollback(self.sopener, self.sjoin("undo"))
510 512 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
511 513 self.reload()
512 514 self.wreload()
513 515 else:
514 516 self.ui.warn(_("no rollback information available\n"))
515 517
516 518 def wreload(self):
517 519 self.dirstate.read()
518 520
519 521 def reload(self):
520 522 self.changelog.load()
521 523 self.manifest.load()
522 524 self.tagscache = None
523 525 self.nodetagscache = None
524 526
525 527 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
526 528 desc=None):
527 529 try:
528 530 l = lock.lock(lockname, 0, releasefn, desc=desc)
529 531 except lock.LockHeld, inst:
530 532 if not wait:
531 533 raise
532 534 self.ui.warn(_("waiting for lock on %s held by %r\n") %
533 535 (desc, inst.locker))
534 536 # default to 600 seconds timeout
535 537 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
536 538 releasefn, desc=desc)
537 539 if acquirefn:
538 540 acquirefn()
539 541 return l
540 542
541 543 def lock(self, wait=1):
542 544 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
543 545 desc=_('repository %s') % self.origroot)
544 546
545 547 def wlock(self, wait=1):
546 548 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
547 549 self.wreload,
548 550 desc=_('working directory of %s') % self.origroot)
549 551
550 552 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
551 553 """
552 554 commit an individual file as part of a larger transaction
553 555 """
554 556
555 557 t = self.wread(fn)
556 558 fl = self.file(fn)
557 559 fp1 = manifest1.get(fn, nullid)
558 560 fp2 = manifest2.get(fn, nullid)
559 561
560 562 meta = {}
561 563 cp = self.dirstate.copied(fn)
562 564 if cp:
563 565 meta["copy"] = cp
564 566 if not manifest2: # not a branch merge
565 567 meta["copyrev"] = hex(manifest1.get(cp, nullid))
566 568 fp2 = nullid
567 569 elif fp2 != nullid: # copied on remote side
568 570 meta["copyrev"] = hex(manifest1.get(cp, nullid))
569 571 elif fp1 != nullid: # copied on local side, reversed
570 572 meta["copyrev"] = hex(manifest2.get(cp))
571 573 fp2 = nullid
572 574 else: # directory rename
573 575 meta["copyrev"] = hex(manifest1.get(cp, nullid))
574 576 self.ui.debug(_(" %s: copy %s:%s\n") %
575 577 (fn, cp, meta["copyrev"]))
576 578 fp1 = nullid
577 579 elif fp2 != nullid:
578 580 # is one parent an ancestor of the other?
579 581 fpa = fl.ancestor(fp1, fp2)
580 582 if fpa == fp1:
581 583 fp1, fp2 = fp2, nullid
582 584 elif fpa == fp2:
583 585 fp2 = nullid
584 586
585 587 # is the file unmodified from the parent? report existing entry
586 588 if fp2 == nullid and not fl.cmp(fp1, t):
587 589 return fp1
588 590
589 591 changelist.append(fn)
590 592 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
591 593
592 594 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
593 595 if p1 is None:
594 596 p1, p2 = self.dirstate.parents()
595 597 return self.commit(files=files, text=text, user=user, date=date,
596 598 p1=p1, p2=p2, wlock=wlock)
597 599
598 600 def commit(self, files=None, text="", user=None, date=None,
599 601 match=util.always, force=False, lock=None, wlock=None,
600 602 force_editor=False, p1=None, p2=None, extra={}):
601 603
602 604 commit = []
603 605 remove = []
604 606 changed = []
605 607 use_dirstate = (p1 is None) # not rawcommit
606 608 extra = extra.copy()
607 609
608 610 if use_dirstate:
609 611 if files:
610 612 for f in files:
611 613 s = self.dirstate.state(f)
612 614 if s in 'nmai':
613 615 commit.append(f)
614 616 elif s == 'r':
615 617 remove.append(f)
616 618 else:
617 619 self.ui.warn(_("%s not tracked!\n") % f)
618 620 else:
619 621 changes = self.status(match=match)[:5]
620 622 modified, added, removed, deleted, unknown = changes
621 623 commit = modified + added
622 624 remove = removed
623 625 else:
624 626 commit = files
625 627
626 628 if use_dirstate:
627 629 p1, p2 = self.dirstate.parents()
628 630 update_dirstate = True
629 631 else:
630 632 p1, p2 = p1, p2 or nullid
631 633 update_dirstate = (self.dirstate.parents()[0] == p1)
632 634
633 635 c1 = self.changelog.read(p1)
634 636 c2 = self.changelog.read(p2)
635 637 m1 = self.manifest.read(c1[0]).copy()
636 638 m2 = self.manifest.read(c2[0])
637 639
638 640 if use_dirstate:
639 641 branchname = util.fromlocal(self.workingctx().branch())
640 642 else:
641 643 branchname = ""
642 644
643 645 if use_dirstate:
644 646 oldname = c1[5].get("branch", "") # stored in UTF-8
645 647 if not commit and not remove and not force and p2 == nullid and \
646 648 branchname == oldname:
647 649 self.ui.status(_("nothing changed\n"))
648 650 return None
649 651
650 652 xp1 = hex(p1)
651 653 if p2 == nullid: xp2 = ''
652 654 else: xp2 = hex(p2)
653 655
654 656 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
655 657
656 658 if not wlock:
657 659 wlock = self.wlock()
658 660 if not lock:
659 661 lock = self.lock()
660 662 tr = self.transaction()
661 663
662 664 # check in files
663 665 new = {}
664 666 linkrev = self.changelog.count()
665 667 commit.sort()
666 668 for f in commit:
667 669 self.ui.note(f + "\n")
668 670 try:
669 671 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
670 672 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
671 673 except IOError:
672 674 if use_dirstate:
673 675 self.ui.warn(_("trouble committing %s!\n") % f)
674 676 raise
675 677 else:
676 678 remove.append(f)
677 679
678 680 # update manifest
679 681 m1.update(new)
680 682 remove.sort()
681 683
682 684 for f in remove:
683 685 if f in m1:
684 686 del m1[f]
685 687 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
686 688
687 689 # add changeset
688 690 new = new.keys()
689 691 new.sort()
690 692
691 693 user = user or self.ui.username()
692 694 if not text or force_editor:
693 695 edittext = []
694 696 if text:
695 697 edittext.append(text)
696 698 edittext.append("")
697 699 edittext.append("HG: user: %s" % user)
698 700 if p2 != nullid:
699 701 edittext.append("HG: branch merge")
700 702 edittext.extend(["HG: changed %s" % f for f in changed])
701 703 edittext.extend(["HG: removed %s" % f for f in remove])
702 704 if not changed and not remove:
703 705 edittext.append("HG: no files changed")
704 706 edittext.append("")
705 707 # run editor in the repository root
706 708 olddir = os.getcwd()
707 709 os.chdir(self.root)
708 710 text = self.ui.edit("\n".join(edittext), user)
709 711 os.chdir(olddir)
710 712
711 713 lines = [line.rstrip() for line in text.rstrip().splitlines()]
712 714 while lines and not lines[0]:
713 715 del lines[0]
714 716 if not lines:
715 717 return None
716 718 text = '\n'.join(lines)
717 719 if branchname:
718 720 extra["branch"] = branchname
719 721 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
720 722 user, date, extra)
721 723 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
722 724 parent2=xp2)
723 725 tr.close()
724 726
725 727 if use_dirstate or update_dirstate:
726 728 self.dirstate.setparents(n)
727 729 if use_dirstate:
728 730 self.dirstate.update(new, "n")
729 731 self.dirstate.forget(remove)
730 732
731 733 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
732 734 return n
733 735
734 736 def walk(self, node=None, files=[], match=util.always, badmatch=None):
735 737 '''
736 738 walk recursively through the directory tree or a given
737 739 changeset, finding all files matched by the match
738 740 function
739 741
740 742 results are yielded in a tuple (src, filename), where src
741 743 is one of:
742 744 'f' the file was found in the directory tree
743 745 'm' the file was only in the dirstate and not in the tree
744 746 'b' file was not found and matched badmatch
745 747 '''
746 748
747 749 if node:
748 750 fdict = dict.fromkeys(files)
749 751 for fn in self.manifest.read(self.changelog.read(node)[0]):
750 752 for ffn in fdict:
751 753 # match if the file is the exact name or a directory
752 754 if ffn == fn or fn.startswith("%s/" % ffn):
753 755 del fdict[ffn]
754 756 break
755 757 if match(fn):
756 758 yield 'm', fn
757 759 for fn in fdict:
758 760 if badmatch and badmatch(fn):
759 761 if match(fn):
760 762 yield 'b', fn
761 763 else:
762 764 self.ui.warn(_('%s: No such file in rev %s\n') % (
763 765 util.pathto(self.getcwd(), fn), short(node)))
764 766 else:
765 767 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
766 768 yield src, fn
767 769
768 770 def status(self, node1=None, node2=None, files=[], match=util.always,
769 771 wlock=None, list_ignored=False, list_clean=False):
770 772 """return status of files between two nodes or node and working directory
771 773
772 774 If node1 is None, use the first dirstate parent instead.
773 775 If node2 is None, compare node1 with working directory.
774 776 """
775 777
776 778 def fcmp(fn, mf):
777 779 t1 = self.wread(fn)
778 780 return self.file(fn).cmp(mf.get(fn, nullid), t1)
779 781
780 782 def mfmatches(node):
781 783 change = self.changelog.read(node)
782 784 mf = self.manifest.read(change[0]).copy()
783 785 for fn in mf.keys():
784 786 if not match(fn):
785 787 del mf[fn]
786 788 return mf
787 789
788 790 modified, added, removed, deleted, unknown = [], [], [], [], []
789 791 ignored, clean = [], []
790 792
791 793 compareworking = False
792 794 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
793 795 compareworking = True
794 796
795 797 if not compareworking:
796 798 # read the manifest from node1 before the manifest from node2,
797 799 # so that we'll hit the manifest cache if we're going through
798 800 # all the revisions in parent->child order.
799 801 mf1 = mfmatches(node1)
800 802
801 803 # are we comparing the working directory?
802 804 if not node2:
803 805 if not wlock:
804 806 try:
805 807 wlock = self.wlock(wait=0)
806 808 except lock.LockException:
807 809 wlock = None
808 810 (lookup, modified, added, removed, deleted, unknown,
809 811 ignored, clean) = self.dirstate.status(files, match,
810 812 list_ignored, list_clean)
811 813
812 814 # are we comparing working dir against its parent?
813 815 if compareworking:
814 816 if lookup:
815 817 # do a full compare of any files that might have changed
816 818 mf2 = mfmatches(self.dirstate.parents()[0])
817 819 for f in lookup:
818 820 if fcmp(f, mf2):
819 821 modified.append(f)
820 822 else:
821 823 clean.append(f)
822 824 if wlock is not None:
823 825 self.dirstate.update([f], "n")
824 826 else:
825 827 # we are comparing working dir against non-parent
826 828 # generate a pseudo-manifest for the working dir
827 829 # XXX: create it in dirstate.py ?
828 830 mf2 = mfmatches(self.dirstate.parents()[0])
829 831 for f in lookup + modified + added:
830 832 mf2[f] = ""
831 833 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
832 834 for f in removed:
833 835 if f in mf2:
834 836 del mf2[f]
835 837 else:
836 838 # we are comparing two revisions
837 839 mf2 = mfmatches(node2)
838 840
839 841 if not compareworking:
840 842 # flush lists from dirstate before comparing manifests
841 843 modified, added, clean = [], [], []
842 844
843 845 # make sure to sort the files so we talk to the disk in a
844 846 # reasonable order
845 847 mf2keys = mf2.keys()
846 848 mf2keys.sort()
847 849 for fn in mf2keys:
848 850 if mf1.has_key(fn):
849 851 if mf1.flags(fn) != mf2.flags(fn) or \
850 852 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
851 853 modified.append(fn)
852 854 elif list_clean:
853 855 clean.append(fn)
854 856 del mf1[fn]
855 857 else:
856 858 added.append(fn)
857 859
858 860 removed = mf1.keys()
859 861
860 862 # sort and return results:
861 863 for l in modified, added, removed, deleted, unknown, ignored, clean:
862 864 l.sort()
863 865 return (modified, added, removed, deleted, unknown, ignored, clean)
864 866
865 867 def add(self, list, wlock=None):
866 868 if not wlock:
867 869 wlock = self.wlock()
868 870 for f in list:
869 871 p = self.wjoin(f)
870 872 if not os.path.exists(p):
871 873 self.ui.warn(_("%s does not exist!\n") % f)
872 874 elif not os.path.isfile(p):
873 875 self.ui.warn(_("%s not added: only files supported currently\n")
874 876 % f)
875 877 elif self.dirstate.state(f) in 'an':
876 878 self.ui.warn(_("%s already tracked!\n") % f)
877 879 else:
878 880 self.dirstate.update([f], "a")
879 881
880 882 def forget(self, list, wlock=None):
881 883 if not wlock:
882 884 wlock = self.wlock()
883 885 for f in list:
884 886 if self.dirstate.state(f) not in 'ai':
885 887 self.ui.warn(_("%s not added!\n") % f)
886 888 else:
887 889 self.dirstate.forget([f])
888 890
889 891 def remove(self, list, unlink=False, wlock=None):
890 892 if unlink:
891 893 for f in list:
892 894 try:
893 895 util.unlink(self.wjoin(f))
894 896 except OSError, inst:
895 897 if inst.errno != errno.ENOENT:
896 898 raise
897 899 if not wlock:
898 900 wlock = self.wlock()
899 901 for f in list:
900 902 p = self.wjoin(f)
901 903 if os.path.exists(p):
902 904 self.ui.warn(_("%s still exists!\n") % f)
903 905 elif self.dirstate.state(f) == 'a':
904 906 self.dirstate.forget([f])
905 907 elif f not in self.dirstate:
906 908 self.ui.warn(_("%s not tracked!\n") % f)
907 909 else:
908 910 self.dirstate.update([f], "r")
909 911
910 912 def undelete(self, list, wlock=None):
911 913 p = self.dirstate.parents()[0]
912 914 mn = self.changelog.read(p)[0]
913 915 m = self.manifest.read(mn)
914 916 if not wlock:
915 917 wlock = self.wlock()
916 918 for f in list:
917 919 if self.dirstate.state(f) not in "r":
918 920 self.ui.warn("%s not removed!\n" % f)
919 921 else:
920 922 t = self.file(f).read(m[f])
921 923 self.wwrite(f, t)
922 924 util.set_exec(self.wjoin(f), m.execf(f))
923 925 self.dirstate.update([f], "n")
924 926
925 927 def copy(self, source, dest, wlock=None):
926 928 p = self.wjoin(dest)
927 929 if not os.path.exists(p):
928 930 self.ui.warn(_("%s does not exist!\n") % dest)
929 931 elif not os.path.isfile(p):
930 932 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
931 933 else:
932 934 if not wlock:
933 935 wlock = self.wlock()
934 936 if self.dirstate.state(dest) == '?':
935 937 self.dirstate.update([dest], "a")
936 938 self.dirstate.copy(source, dest)
937 939
938 940 def heads(self, start=None):
939 941 heads = self.changelog.heads(start)
940 942 # sort the output in rev descending order
941 943 heads = [(-self.changelog.rev(h), h) for h in heads]
942 944 heads.sort()
943 945 return [n for (r, n) in heads]
944 946
945 947 # branchlookup returns a dict giving a list of branches for
946 948 # each head. A branch is defined as the tag of a node or
947 949 # the branch of the node's parents. If a node has multiple
948 950 # branch tags, tags are eliminated if they are visible from other
949 951 # branch tags.
950 952 #
951 953 # So, for this graph: a->b->c->d->e
952 954 # \ /
953 955 # aa -----/
954 956 # a has tag 2.6.12
955 957 # d has tag 2.6.13
956 958 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
957 959 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
958 960 # from the list.
959 961 #
960 962 # It is possible that more than one head will have the same branch tag.
961 963 # callers need to check the result for multiple heads under the same
962 964 # branch tag if that is a problem for them (ie checkout of a specific
963 965 # branch).
964 966 #
965 967 # passing in a specific branch will limit the depth of the search
966 968 # through the parents. It won't limit the branches returned in the
967 969 # result though.
968 970 def branchlookup(self, heads=None, branch=None):
969 971 if not heads:
970 972 heads = self.heads()
971 973 headt = [ h for h in heads ]
972 974 chlog = self.changelog
973 975 branches = {}
974 976 merges = []
975 977 seenmerge = {}
976 978
977 979 # traverse the tree once for each head, recording in the branches
978 980 # dict which tags are visible from this head. The branches
979 981 # dict also records which tags are visible from each tag
980 982 # while we traverse.
981 983 while headt or merges:
982 984 if merges:
983 985 n, found = merges.pop()
984 986 visit = [n]
985 987 else:
986 988 h = headt.pop()
987 989 visit = [h]
988 990 found = [h]
989 991 seen = {}
990 992 while visit:
991 993 n = visit.pop()
992 994 if n in seen:
993 995 continue
994 996 pp = chlog.parents(n)
995 997 tags = self.nodetags(n)
996 998 if tags:
997 999 for x in tags:
998 1000 if x == 'tip':
999 1001 continue
1000 1002 for f in found:
1001 1003 branches.setdefault(f, {})[n] = 1
1002 1004 branches.setdefault(n, {})[n] = 1
1003 1005 break
1004 1006 if n not in found:
1005 1007 found.append(n)
1006 1008 if branch in tags:
1007 1009 continue
1008 1010 seen[n] = 1
1009 1011 if pp[1] != nullid and n not in seenmerge:
1010 1012 merges.append((pp[1], [x for x in found]))
1011 1013 seenmerge[n] = 1
1012 1014 if pp[0] != nullid:
1013 1015 visit.append(pp[0])
1014 1016 # traverse the branches dict, eliminating branch tags from each
1015 1017 # head that are visible from another branch tag for that head.
1016 1018 out = {}
1017 1019 viscache = {}
1018 1020 for h in heads:
1019 1021 def visible(node):
1020 1022 if node in viscache:
1021 1023 return viscache[node]
1022 1024 ret = {}
1023 1025 visit = [node]
1024 1026 while visit:
1025 1027 x = visit.pop()
1026 1028 if x in viscache:
1027 1029 ret.update(viscache[x])
1028 1030 elif x not in ret:
1029 1031 ret[x] = 1
1030 1032 if x in branches:
1031 1033 visit[len(visit):] = branches[x].keys()
1032 1034 viscache[node] = ret
1033 1035 return ret
1034 1036 if h not in branches:
1035 1037 continue
1036 1038 # O(n^2), but somewhat limited. This only searches the
1037 1039 # tags visible from a specific head, not all the tags in the
1038 1040 # whole repo.
1039 1041 for b in branches[h]:
1040 1042 vis = False
1041 1043 for bb in branches[h].keys():
1042 1044 if b != bb:
1043 1045 if b in visible(bb):
1044 1046 vis = True
1045 1047 break
1046 1048 if not vis:
1047 1049 l = out.setdefault(h, [])
1048 1050 l[len(l):] = self.nodetags(b)
1049 1051 return out
1050 1052
1051 1053 def branches(self, nodes):
1052 1054 if not nodes:
1053 1055 nodes = [self.changelog.tip()]
1054 1056 b = []
1055 1057 for n in nodes:
1056 1058 t = n
1057 1059 while 1:
1058 1060 p = self.changelog.parents(n)
1059 1061 if p[1] != nullid or p[0] == nullid:
1060 1062 b.append((t, n, p[0], p[1]))
1061 1063 break
1062 1064 n = p[0]
1063 1065 return b
1064 1066
1065 1067 def between(self, pairs):
1066 1068 r = []
1067 1069
1068 1070 for top, bottom in pairs:
1069 1071 n, l, i = top, [], 0
1070 1072 f = 1
1071 1073
1072 1074 while n != bottom:
1073 1075 p = self.changelog.parents(n)[0]
1074 1076 if i == f:
1075 1077 l.append(n)
1076 1078 f = f * 2
1077 1079 n = p
1078 1080 i += 1
1079 1081
1080 1082 r.append(l)
1081 1083
1082 1084 return r
1083 1085
1084 1086 def findincoming(self, remote, base=None, heads=None, force=False):
1085 1087 """Return list of roots of the subsets of missing nodes from remote
1086 1088
1087 1089 If base dict is specified, assume that these nodes and their parents
1088 1090 exist on the remote side and that no child of a node of base exists
1089 1091 in both remote and self.
1090 1092 Furthermore base will be updated to include the nodes that exists
1091 1093 in self and remote but no children exists in self and remote.
1092 1094 If a list of heads is specified, return only nodes which are heads
1093 1095 or ancestors of these heads.
1094 1096
1095 1097 All the ancestors of base are in self and in remote.
1096 1098 All the descendants of the list returned are missing in self.
1097 1099 (and so we know that the rest of the nodes are missing in remote, see
1098 1100 outgoing)
1099 1101 """
1100 1102 m = self.changelog.nodemap
1101 1103 search = []
1102 1104 fetch = {}
1103 1105 seen = {}
1104 1106 seenbranch = {}
1105 1107 if base == None:
1106 1108 base = {}
1107 1109
1108 1110 if not heads:
1109 1111 heads = remote.heads()
1110 1112
1111 1113 if self.changelog.tip() == nullid:
1112 1114 base[nullid] = 1
1113 1115 if heads != [nullid]:
1114 1116 return [nullid]
1115 1117 return []
1116 1118
1117 1119 # assume we're closer to the tip than the root
1118 1120 # and start by examining the heads
1119 1121 self.ui.status(_("searching for changes\n"))
1120 1122
1121 1123 unknown = []
1122 1124 for h in heads:
1123 1125 if h not in m:
1124 1126 unknown.append(h)
1125 1127 else:
1126 1128 base[h] = 1
1127 1129
1128 1130 if not unknown:
1129 1131 return []
1130 1132
1131 1133 req = dict.fromkeys(unknown)
1132 1134 reqcnt = 0
1133 1135
1134 1136 # search through remote branches
1135 1137 # a 'branch' here is a linear segment of history, with four parts:
1136 1138 # head, root, first parent, second parent
1137 1139 # (a branch always has two parents (or none) by definition)
1138 1140 unknown = remote.branches(unknown)
1139 1141 while unknown:
1140 1142 r = []
1141 1143 while unknown:
1142 1144 n = unknown.pop(0)
1143 1145 if n[0] in seen:
1144 1146 continue
1145 1147
1146 1148 self.ui.debug(_("examining %s:%s\n")
1147 1149 % (short(n[0]), short(n[1])))
1148 1150 if n[0] == nullid: # found the end of the branch
1149 1151 pass
1150 1152 elif n in seenbranch:
1151 1153 self.ui.debug(_("branch already found\n"))
1152 1154 continue
1153 1155 elif n[1] and n[1] in m: # do we know the base?
1154 1156 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 1157 % (short(n[0]), short(n[1])))
1156 1158 search.append(n) # schedule branch range for scanning
1157 1159 seenbranch[n] = 1
1158 1160 else:
1159 1161 if n[1] not in seen and n[1] not in fetch:
1160 1162 if n[2] in m and n[3] in m:
1161 1163 self.ui.debug(_("found new changeset %s\n") %
1162 1164 short(n[1]))
1163 1165 fetch[n[1]] = 1 # earliest unknown
1164 1166 for p in n[2:4]:
1165 1167 if p in m:
1166 1168 base[p] = 1 # latest known
1167 1169
1168 1170 for p in n[2:4]:
1169 1171 if p not in req and p not in m:
1170 1172 r.append(p)
1171 1173 req[p] = 1
1172 1174 seen[n[0]] = 1
1173 1175
1174 1176 if r:
1175 1177 reqcnt += 1
1176 1178 self.ui.debug(_("request %d: %s\n") %
1177 1179 (reqcnt, " ".join(map(short, r))))
1178 1180 for p in xrange(0, len(r), 10):
1179 1181 for b in remote.branches(r[p:p+10]):
1180 1182 self.ui.debug(_("received %s:%s\n") %
1181 1183 (short(b[0]), short(b[1])))
1182 1184 unknown.append(b)
1183 1185
1184 1186 # do binary search on the branches we found
1185 1187 while search:
1186 1188 n = search.pop(0)
1187 1189 reqcnt += 1
1188 1190 l = remote.between([(n[0], n[1])])[0]
1189 1191 l.append(n[1])
1190 1192 p = n[0]
1191 1193 f = 1
1192 1194 for i in l:
1193 1195 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 1196 if i in m:
1195 1197 if f <= 2:
1196 1198 self.ui.debug(_("found new branch changeset %s\n") %
1197 1199 short(p))
1198 1200 fetch[p] = 1
1199 1201 base[i] = 1
1200 1202 else:
1201 1203 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 1204 % (short(p), short(i)))
1203 1205 search.append((p, i))
1204 1206 break
1205 1207 p, f = i, f * 2
1206 1208
1207 1209 # sanity check our fetch list
1208 1210 for f in fetch.keys():
1209 1211 if f in m:
1210 1212 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211 1213
1212 1214 if base.keys() == [nullid]:
1213 1215 if force:
1214 1216 self.ui.warn(_("warning: repository is unrelated\n"))
1215 1217 else:
1216 1218 raise util.Abort(_("repository is unrelated"))
1217 1219
1218 1220 self.ui.debug(_("found new changesets starting at ") +
1219 1221 " ".join([short(f) for f in fetch]) + "\n")
1220 1222
1221 1223 self.ui.debug(_("%d total queries\n") % reqcnt)
1222 1224
1223 1225 return fetch.keys()
1224 1226
1225 1227 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 1228 """Return list of nodes that are roots of subsets not in remote
1227 1229
1228 1230 If base dict is specified, assume that these nodes and their parents
1229 1231 exist on the remote side.
1230 1232 If a list of heads is specified, return only nodes which are heads
1231 1233 or ancestors of these heads, and return a second element which
1232 1234 contains all remote heads which get new children.
1233 1235 """
1234 1236 if base == None:
1235 1237 base = {}
1236 1238 self.findincoming(remote, base, heads, force=force)
1237 1239
1238 1240 self.ui.debug(_("common changesets up to ")
1239 1241 + " ".join(map(short, base.keys())) + "\n")
1240 1242
1241 1243 remain = dict.fromkeys(self.changelog.nodemap)
1242 1244
1243 1245 # prune everything remote has from the tree
1244 1246 del remain[nullid]
1245 1247 remove = base.keys()
1246 1248 while remove:
1247 1249 n = remove.pop(0)
1248 1250 if n in remain:
1249 1251 del remain[n]
1250 1252 for p in self.changelog.parents(n):
1251 1253 remove.append(p)
1252 1254
1253 1255 # find every node whose parents have been pruned
1254 1256 subset = []
1255 1257 # find every remote head that will get new children
1256 1258 updated_heads = {}
1257 1259 for n in remain:
1258 1260 p1, p2 = self.changelog.parents(n)
1259 1261 if p1 not in remain and p2 not in remain:
1260 1262 subset.append(n)
1261 1263 if heads:
1262 1264 if p1 in heads:
1263 1265 updated_heads[p1] = True
1264 1266 if p2 in heads:
1265 1267 updated_heads[p2] = True
1266 1268
1267 1269 # this is the set of all roots we have to push
1268 1270 if heads:
1269 1271 return subset, updated_heads.keys()
1270 1272 else:
1271 1273 return subset
1272 1274
1273 1275 def pull(self, remote, heads=None, force=False, lock=None):
1274 1276 mylock = False
1275 1277 if not lock:
1276 1278 lock = self.lock()
1277 1279 mylock = True
1278 1280
1279 1281 try:
1280 1282 fetch = self.findincoming(remote, force=force)
1281 1283 if fetch == [nullid]:
1282 1284 self.ui.status(_("requesting all changes\n"))
1283 1285
1284 1286 if not fetch:
1285 1287 self.ui.status(_("no changes found\n"))
1286 1288 return 0
1287 1289
1288 1290 if heads is None:
1289 1291 cg = remote.changegroup(fetch, 'pull')
1290 1292 else:
1291 1293 if 'changegroupsubset' not in remote.capabilities:
1292 1294 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 1295 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 1296 return self.addchangegroup(cg, 'pull', remote.url())
1295 1297 finally:
1296 1298 if mylock:
1297 1299 lock.release()
1298 1300
1299 1301 def push(self, remote, force=False, revs=None):
1300 1302 # there are two ways to push to remote repo:
1301 1303 #
1302 1304 # addchangegroup assumes local user can lock remote
1303 1305 # repo (local filesystem, old ssh servers).
1304 1306 #
1305 1307 # unbundle assumes local user cannot lock remote repo (new ssh
1306 1308 # servers, http servers).
1307 1309
1308 1310 if remote.capable('unbundle'):
1309 1311 return self.push_unbundle(remote, force, revs)
1310 1312 return self.push_addchangegroup(remote, force, revs)
1311 1313
1312 1314 def prepush(self, remote, force, revs):
1313 1315 base = {}
1314 1316 remote_heads = remote.heads()
1315 1317 inc = self.findincoming(remote, base, remote_heads, force=force)
1316 1318
1317 1319 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 1320 if revs is not None:
1319 1321 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 1322 else:
1321 1323 bases, heads = update, self.changelog.heads()
1322 1324
1323 1325 if not bases:
1324 1326 self.ui.status(_("no changes found\n"))
1325 1327 return None, 1
1326 1328 elif not force:
1327 1329 # check if we're creating new remote heads
1328 1330 # to be a remote head after push, node must be either
1329 1331 # - unknown locally
1330 1332 # - a local outgoing head descended from update
1331 1333 # - a remote head that's known locally and not
1332 1334 # ancestral to an outgoing head
1333 1335
1334 1336 warn = 0
1335 1337
1336 1338 if remote_heads == [nullid]:
1337 1339 warn = 0
1338 1340 elif not revs and len(heads) > len(remote_heads):
1339 1341 warn = 1
1340 1342 else:
1341 1343 newheads = list(heads)
1342 1344 for r in remote_heads:
1343 1345 if r in self.changelog.nodemap:
1344 1346 desc = self.changelog.heads(r)
1345 1347 l = [h for h in heads if h in desc]
1346 1348 if not l:
1347 1349 newheads.append(r)
1348 1350 else:
1349 1351 newheads.append(r)
1350 1352 if len(newheads) > len(remote_heads):
1351 1353 warn = 1
1352 1354
1353 1355 if warn:
1354 1356 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 1357 self.ui.status(_("(did you forget to merge?"
1356 1358 " use push -f to force)\n"))
1357 1359 return None, 1
1358 1360 elif inc:
1359 1361 self.ui.warn(_("note: unsynced remote changes!\n"))
1360 1362
1361 1363
1362 1364 if revs is None:
1363 1365 cg = self.changegroup(update, 'push')
1364 1366 else:
1365 1367 cg = self.changegroupsubset(update, revs, 'push')
1366 1368 return cg, remote_heads
1367 1369
1368 1370 def push_addchangegroup(self, remote, force, revs):
1369 1371 lock = remote.lock()
1370 1372
1371 1373 ret = self.prepush(remote, force, revs)
1372 1374 if ret[0] is not None:
1373 1375 cg, remote_heads = ret
1374 1376 return remote.addchangegroup(cg, 'push', self.url())
1375 1377 return ret[1]
1376 1378
1377 1379 def push_unbundle(self, remote, force, revs):
1378 1380 # local repo finds heads on server, finds out what revs it
1379 1381 # must push. once revs transferred, if server finds it has
1380 1382 # different heads (someone else won commit/push race), server
1381 1383 # aborts.
1382 1384
1383 1385 ret = self.prepush(remote, force, revs)
1384 1386 if ret[0] is not None:
1385 1387 cg, remote_heads = ret
1386 1388 if force: remote_heads = ['force']
1387 1389 return remote.unbundle(cg, remote_heads, 'push')
1388 1390 return ret[1]
1389 1391
1390 1392 def changegroupinfo(self, nodes):
1391 1393 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 1394 if self.ui.debugflag:
1393 1395 self.ui.debug(_("List of changesets:\n"))
1394 1396 for node in nodes:
1395 1397 self.ui.debug("%s\n" % hex(node))
1396 1398
1397 1399 def changegroupsubset(self, bases, heads, source):
1398 1400 """This function generates a changegroup consisting of all the nodes
1399 1401 that are descendents of any of the bases, and ancestors of any of
1400 1402 the heads.
1401 1403
1402 1404 It is fairly complex as determining which filenodes and which
1403 1405 manifest nodes need to be included for the changeset to be complete
1404 1406 is non-trivial.
1405 1407
1406 1408 Another wrinkle is doing the reverse, figuring out which changeset in
1407 1409 the changegroup a particular filenode or manifestnode belongs to."""
1408 1410
1409 1411 self.hook('preoutgoing', throw=True, source=source)
1410 1412
1411 1413 # Set up some initial variables
1412 1414 # Make it easy to refer to self.changelog
1413 1415 cl = self.changelog
1414 1416 # msng is short for missing - compute the list of changesets in this
1415 1417 # changegroup.
1416 1418 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 1419 self.changegroupinfo(msng_cl_lst)
1418 1420 # Some bases may turn out to be superfluous, and some heads may be
1419 1421 # too. nodesbetween will return the minimal set of bases and heads
1420 1422 # necessary to re-create the changegroup.
1421 1423
1422 1424 # Known heads are the list of heads that it is assumed the recipient
1423 1425 # of this changegroup will know about.
1424 1426 knownheads = {}
1425 1427 # We assume that all parents of bases are known heads.
1426 1428 for n in bases:
1427 1429 for p in cl.parents(n):
1428 1430 if p != nullid:
1429 1431 knownheads[p] = 1
1430 1432 knownheads = knownheads.keys()
1431 1433 if knownheads:
1432 1434 # Now that we know what heads are known, we can compute which
1433 1435 # changesets are known. The recipient must know about all
1434 1436 # changesets required to reach the known heads from the null
1435 1437 # changeset.
1436 1438 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 1439 junk = None
1438 1440 # Transform the list into an ersatz set.
1439 1441 has_cl_set = dict.fromkeys(has_cl_set)
1440 1442 else:
1441 1443 # If there were no known heads, the recipient cannot be assumed to
1442 1444 # know about any changesets.
1443 1445 has_cl_set = {}
1444 1446
1445 1447 # Make it easy to refer to self.manifest
1446 1448 mnfst = self.manifest
1447 1449 # We don't know which manifests are missing yet
1448 1450 msng_mnfst_set = {}
1449 1451 # Nor do we know which filenodes are missing.
1450 1452 msng_filenode_set = {}
1451 1453
1452 1454 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 1455 junk = None
1454 1456
1455 1457 # A changeset always belongs to itself, so the changenode lookup
1456 1458 # function for a changenode is identity.
1457 1459 def identity(x):
1458 1460 return x
1459 1461
1460 1462 # A function generating function. Sets up an environment for the
1461 1463 # inner function.
1462 1464 def cmp_by_rev_func(revlog):
1463 1465 # Compare two nodes by their revision number in the environment's
1464 1466 # revision history. Since the revision number both represents the
1465 1467 # most efficient order to read the nodes in, and represents a
1466 1468 # topological sorting of the nodes, this function is often useful.
1467 1469 def cmp_by_rev(a, b):
1468 1470 return cmp(revlog.rev(a), revlog.rev(b))
1469 1471 return cmp_by_rev
1470 1472
1471 1473 # If we determine that a particular file or manifest node must be a
1472 1474 # node that the recipient of the changegroup will already have, we can
1473 1475 # also assume the recipient will have all the parents. This function
1474 1476 # prunes them from the set of missing nodes.
1475 1477 def prune_parents(revlog, hasset, msngset):
1476 1478 haslst = hasset.keys()
1477 1479 haslst.sort(cmp_by_rev_func(revlog))
1478 1480 for node in haslst:
1479 1481 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 1482 while parentlst:
1481 1483 n = parentlst.pop()
1482 1484 if n not in hasset:
1483 1485 hasset[n] = 1
1484 1486 p = [p for p in revlog.parents(n) if p != nullid]
1485 1487 parentlst.extend(p)
1486 1488 for n in hasset:
1487 1489 msngset.pop(n, None)
1488 1490
1489 1491 # This is a function generating function used to set up an environment
1490 1492 # for the inner function to execute in.
1491 1493 def manifest_and_file_collector(changedfileset):
1492 1494 # This is an information gathering function that gathers
1493 1495 # information from each changeset node that goes out as part of
1494 1496 # the changegroup. The information gathered is a list of which
1495 1497 # manifest nodes are potentially required (the recipient may
1496 1498 # already have them) and total list of all files which were
1497 1499 # changed in any changeset in the changegroup.
1498 1500 #
1499 1501 # We also remember the first changenode we saw any manifest
1500 1502 # referenced by so we can later determine which changenode 'owns'
1501 1503 # the manifest.
1502 1504 def collect_manifests_and_files(clnode):
1503 1505 c = cl.read(clnode)
1504 1506 for f in c[3]:
1505 1507 # This is to make sure we only have one instance of each
1506 1508 # filename string for each filename.
1507 1509 changedfileset.setdefault(f, f)
1508 1510 msng_mnfst_set.setdefault(c[0], clnode)
1509 1511 return collect_manifests_and_files
1510 1512
1511 1513 # Figure out which manifest nodes (of the ones we think might be part
1512 1514 # of the changegroup) the recipient must know about and remove them
1513 1515 # from the changegroup.
1514 1516 def prune_manifests():
1515 1517 has_mnfst_set = {}
1516 1518 for n in msng_mnfst_set:
1517 1519 # If a 'missing' manifest thinks it belongs to a changenode
1518 1520 # the recipient is assumed to have, obviously the recipient
1519 1521 # must have that manifest.
1520 1522 linknode = cl.node(mnfst.linkrev(n))
1521 1523 if linknode in has_cl_set:
1522 1524 has_mnfst_set[n] = 1
1523 1525 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524 1526
1525 1527 # Use the information collected in collect_manifests_and_files to say
1526 1528 # which changenode any manifestnode belongs to.
1527 1529 def lookup_manifest_link(mnfstnode):
1528 1530 return msng_mnfst_set[mnfstnode]
1529 1531
1530 1532 # A function generating function that sets up the initial environment
1531 1533 # the inner function.
1532 1534 def filenode_collector(changedfiles):
1533 1535 next_rev = [0]
1534 1536 # This gathers information from each manifestnode included in the
1535 1537 # changegroup about which filenodes the manifest node references
1536 1538 # so we can include those in the changegroup too.
1537 1539 #
1538 1540 # It also remembers which changenode each filenode belongs to. It
1539 1541 # does this by assuming the a filenode belongs to the changenode
1540 1542 # the first manifest that references it belongs to.
1541 1543 def collect_msng_filenodes(mnfstnode):
1542 1544 r = mnfst.rev(mnfstnode)
1543 1545 if r == next_rev[0]:
1544 1546 # If the last rev we looked at was the one just previous,
1545 1547 # we only need to see a diff.
1546 1548 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 1549 # For each line in the delta
1548 1550 for dline in delta.splitlines():
1549 1551 # get the filename and filenode for that line
1550 1552 f, fnode = dline.split('\0')
1551 1553 fnode = bin(fnode[:40])
1552 1554 f = changedfiles.get(f, None)
1553 1555 # And if the file is in the list of files we care
1554 1556 # about.
1555 1557 if f is not None:
1556 1558 # Get the changenode this manifest belongs to
1557 1559 clnode = msng_mnfst_set[mnfstnode]
1558 1560 # Create the set of filenodes for the file if
1559 1561 # there isn't one already.
1560 1562 ndset = msng_filenode_set.setdefault(f, {})
1561 1563 # And set the filenode's changelog node to the
1562 1564 # manifest's if it hasn't been set already.
1563 1565 ndset.setdefault(fnode, clnode)
1564 1566 else:
1565 1567 # Otherwise we need a full manifest.
1566 1568 m = mnfst.read(mnfstnode)
1567 1569 # For every file in we care about.
1568 1570 for f in changedfiles:
1569 1571 fnode = m.get(f, None)
1570 1572 # If it's in the manifest
1571 1573 if fnode is not None:
1572 1574 # See comments above.
1573 1575 clnode = msng_mnfst_set[mnfstnode]
1574 1576 ndset = msng_filenode_set.setdefault(f, {})
1575 1577 ndset.setdefault(fnode, clnode)
1576 1578 # Remember the revision we hope to see next.
1577 1579 next_rev[0] = r + 1
1578 1580 return collect_msng_filenodes
1579 1581
1580 1582 # We have a list of filenodes we think we need for a file, lets remove
1581 1583 # all those we now the recipient must have.
1582 1584 def prune_filenodes(f, filerevlog):
1583 1585 msngset = msng_filenode_set[f]
1584 1586 hasset = {}
1585 1587 # If a 'missing' filenode thinks it belongs to a changenode we
1586 1588 # assume the recipient must have, then the recipient must have
1587 1589 # that filenode.
1588 1590 for n in msngset:
1589 1591 clnode = cl.node(filerevlog.linkrev(n))
1590 1592 if clnode in has_cl_set:
1591 1593 hasset[n] = 1
1592 1594 prune_parents(filerevlog, hasset, msngset)
1593 1595
1594 1596 # A function generator function that sets up the a context for the
1595 1597 # inner function.
1596 1598 def lookup_filenode_link_func(fname):
1597 1599 msngset = msng_filenode_set[fname]
1598 1600 # Lookup the changenode the filenode belongs to.
1599 1601 def lookup_filenode_link(fnode):
1600 1602 return msngset[fnode]
1601 1603 return lookup_filenode_link
1602 1604
1603 1605 # Now that we have all theses utility functions to help out and
1604 1606 # logically divide up the task, generate the group.
1605 1607 def gengroup():
1606 1608 # The set of changed files starts empty.
1607 1609 changedfiles = {}
1608 1610 # Create a changenode group generator that will call our functions
1609 1611 # back to lookup the owning changenode and collect information.
1610 1612 group = cl.group(msng_cl_lst, identity,
1611 1613 manifest_and_file_collector(changedfiles))
1612 1614 for chnk in group:
1613 1615 yield chnk
1614 1616
1615 1617 # The list of manifests has been collected by the generator
1616 1618 # calling our functions back.
1617 1619 prune_manifests()
1618 1620 msng_mnfst_lst = msng_mnfst_set.keys()
1619 1621 # Sort the manifestnodes by revision number.
1620 1622 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 1623 # Create a generator for the manifestnodes that calls our lookup
1622 1624 # and data collection functions back.
1623 1625 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 1626 filenode_collector(changedfiles))
1625 1627 for chnk in group:
1626 1628 yield chnk
1627 1629
1628 1630 # These are no longer needed, dereference and toss the memory for
1629 1631 # them.
1630 1632 msng_mnfst_lst = None
1631 1633 msng_mnfst_set.clear()
1632 1634
1633 1635 changedfiles = changedfiles.keys()
1634 1636 changedfiles.sort()
1635 1637 # Go through all our files in order sorted by name.
1636 1638 for fname in changedfiles:
1637 1639 filerevlog = self.file(fname)
1638 1640 # Toss out the filenodes that the recipient isn't really
1639 1641 # missing.
1640 1642 if msng_filenode_set.has_key(fname):
1641 1643 prune_filenodes(fname, filerevlog)
1642 1644 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 1645 else:
1644 1646 msng_filenode_lst = []
1645 1647 # If any filenodes are left, generate the group for them,
1646 1648 # otherwise don't bother.
1647 1649 if len(msng_filenode_lst) > 0:
1648 1650 yield changegroup.genchunk(fname)
1649 1651 # Sort the filenodes by their revision #
1650 1652 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 1653 # Create a group generator and only pass in a changenode
1652 1654 # lookup function as we need to collect no information
1653 1655 # from filenodes.
1654 1656 group = filerevlog.group(msng_filenode_lst,
1655 1657 lookup_filenode_link_func(fname))
1656 1658 for chnk in group:
1657 1659 yield chnk
1658 1660 if msng_filenode_set.has_key(fname):
1659 1661 # Don't need this anymore, toss it to free memory.
1660 1662 del msng_filenode_set[fname]
1661 1663 # Signal that no more groups are left.
1662 1664 yield changegroup.closechunk()
1663 1665
1664 1666 if msng_cl_lst:
1665 1667 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666 1668
1667 1669 return util.chunkbuffer(gengroup())
1668 1670
1669 1671 def changegroup(self, basenodes, source):
1670 1672 """Generate a changegroup of all nodes that we have that a recipient
1671 1673 doesn't.
1672 1674
1673 1675 This is much easier than the previous function as we can assume that
1674 1676 the recipient has any changenode we aren't sending them."""
1675 1677
1676 1678 self.hook('preoutgoing', throw=True, source=source)
1677 1679
1678 1680 cl = self.changelog
1679 1681 nodes = cl.nodesbetween(basenodes, None)[0]
1680 1682 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 1683 self.changegroupinfo(nodes)
1682 1684
1683 1685 def identity(x):
1684 1686 return x
1685 1687
1686 1688 def gennodelst(revlog):
1687 1689 for r in xrange(0, revlog.count()):
1688 1690 n = revlog.node(r)
1689 1691 if revlog.linkrev(n) in revset:
1690 1692 yield n
1691 1693
1692 1694 def changed_file_collector(changedfileset):
1693 1695 def collect_changed_files(clnode):
1694 1696 c = cl.read(clnode)
1695 1697 for fname in c[3]:
1696 1698 changedfileset[fname] = 1
1697 1699 return collect_changed_files
1698 1700
1699 1701 def lookuprevlink_func(revlog):
1700 1702 def lookuprevlink(n):
1701 1703 return cl.node(revlog.linkrev(n))
1702 1704 return lookuprevlink
1703 1705
1704 1706 def gengroup():
1705 1707 # construct a list of all changed files
1706 1708 changedfiles = {}
1707 1709
1708 1710 for chnk in cl.group(nodes, identity,
1709 1711 changed_file_collector(changedfiles)):
1710 1712 yield chnk
1711 1713 changedfiles = changedfiles.keys()
1712 1714 changedfiles.sort()
1713 1715
1714 1716 mnfst = self.manifest
1715 1717 nodeiter = gennodelst(mnfst)
1716 1718 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 1719 yield chnk
1718 1720
1719 1721 for fname in changedfiles:
1720 1722 filerevlog = self.file(fname)
1721 1723 nodeiter = gennodelst(filerevlog)
1722 1724 nodeiter = list(nodeiter)
1723 1725 if nodeiter:
1724 1726 yield changegroup.genchunk(fname)
1725 1727 lookup = lookuprevlink_func(filerevlog)
1726 1728 for chnk in filerevlog.group(nodeiter, lookup):
1727 1729 yield chnk
1728 1730
1729 1731 yield changegroup.closechunk()
1730 1732
1731 1733 if nodes:
1732 1734 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733 1735
1734 1736 return util.chunkbuffer(gengroup())
1735 1737
1736 1738 def addchangegroup(self, source, srctype, url):
1737 1739 """add changegroup to repo.
1738 1740 returns number of heads modified or added + 1."""
1739 1741
1740 1742 def csmap(x):
1741 1743 self.ui.debug(_("add changeset %s\n") % short(x))
1742 1744 return cl.count()
1743 1745
1744 1746 def revmap(x):
1745 1747 return cl.rev(x)
1746 1748
1747 1749 if not source:
1748 1750 return 0
1749 1751
1750 1752 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1751 1753
1752 1754 changesets = files = revisions = 0
1753 1755
1754 1756 tr = self.transaction()
1755 1757
1756 1758 # write changelog data to temp files so concurrent readers will not see
1757 1759 # inconsistent view
1758 1760 cl = None
1759 1761 try:
1760 1762 cl = appendfile.appendchangelog(self.sopener,
1761 1763 self.changelog.version)
1762 1764
1763 1765 oldheads = len(cl.heads())
1764 1766
1765 1767 # pull off the changeset group
1766 1768 self.ui.status(_("adding changesets\n"))
1767 1769 cor = cl.count() - 1
1768 1770 chunkiter = changegroup.chunkiter(source)
1769 1771 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1770 1772 raise util.Abort(_("received changelog group is empty"))
1771 1773 cnr = cl.count() - 1
1772 1774 changesets = cnr - cor
1773 1775
1774 1776 # pull off the manifest group
1775 1777 self.ui.status(_("adding manifests\n"))
1776 1778 chunkiter = changegroup.chunkiter(source)
1777 1779 # no need to check for empty manifest group here:
1778 1780 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1779 1781 # no new manifest will be created and the manifest group will
1780 1782 # be empty during the pull
1781 1783 self.manifest.addgroup(chunkiter, revmap, tr)
1782 1784
1783 1785 # process the files
1784 1786 self.ui.status(_("adding file changes\n"))
1785 1787 while 1:
1786 1788 f = changegroup.getchunk(source)
1787 1789 if not f:
1788 1790 break
1789 1791 self.ui.debug(_("adding %s revisions\n") % f)
1790 1792 fl = self.file(f)
1791 1793 o = fl.count()
1792 1794 chunkiter = changegroup.chunkiter(source)
1793 1795 if fl.addgroup(chunkiter, revmap, tr) is None:
1794 1796 raise util.Abort(_("received file revlog group is empty"))
1795 1797 revisions += fl.count() - o
1796 1798 files += 1
1797 1799
1798 1800 cl.writedata()
1799 1801 finally:
1800 1802 if cl:
1801 1803 cl.cleanup()
1802 1804
1803 1805 # make changelog see real files again
1804 1806 self.changelog = changelog.changelog(self.sopener,
1805 1807 self.changelog.version)
1806 1808 self.changelog.checkinlinesize(tr)
1807 1809
1808 1810 newheads = len(self.changelog.heads())
1809 1811 heads = ""
1810 1812 if oldheads and newheads != oldheads:
1811 1813 heads = _(" (%+d heads)") % (newheads - oldheads)
1812 1814
1813 1815 self.ui.status(_("added %d changesets"
1814 1816 " with %d changes to %d files%s\n")
1815 1817 % (changesets, revisions, files, heads))
1816 1818
1817 1819 if changesets > 0:
1818 1820 self.hook('pretxnchangegroup', throw=True,
1819 1821 node=hex(self.changelog.node(cor+1)), source=srctype,
1820 1822 url=url)
1821 1823
1822 1824 tr.close()
1823 1825
1824 1826 if changesets > 0:
1825 1827 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1826 1828 source=srctype, url=url)
1827 1829
1828 1830 for i in xrange(cor + 1, cnr + 1):
1829 1831 self.hook("incoming", node=hex(self.changelog.node(i)),
1830 1832 source=srctype, url=url)
1831 1833
1832 1834 return newheads - oldheads + 1
1833 1835
1834 1836
1835 1837 def stream_in(self, remote):
1836 1838 fp = remote.stream_out()
1837 1839 l = fp.readline()
1838 1840 try:
1839 1841 resp = int(l)
1840 1842 except ValueError:
1841 1843 raise util.UnexpectedOutput(
1842 1844 _('Unexpected response from remote server:'), l)
1843 1845 if resp == 1:
1844 1846 raise util.Abort(_('operation forbidden by server'))
1845 1847 elif resp == 2:
1846 1848 raise util.Abort(_('locking the remote repository failed'))
1847 1849 elif resp != 0:
1848 1850 raise util.Abort(_('the server sent an unknown error code'))
1849 1851 self.ui.status(_('streaming all changes\n'))
1850 1852 l = fp.readline()
1851 1853 try:
1852 1854 total_files, total_bytes = map(int, l.split(' ', 1))
1853 1855 except ValueError, TypeError:
1854 1856 raise util.UnexpectedOutput(
1855 1857 _('Unexpected response from remote server:'), l)
1856 1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1857 1859 (total_files, util.bytecount(total_bytes)))
1858 1860 start = time.time()
1859 1861 for i in xrange(total_files):
1860 1862 # XXX doesn't support '\n' or '\r' in filenames
1861 1863 l = fp.readline()
1862 1864 try:
1863 1865 name, size = l.split('\0', 1)
1864 1866 size = int(size)
1865 1867 except ValueError, TypeError:
1866 1868 raise util.UnexpectedOutput(
1867 1869 _('Unexpected response from remote server:'), l)
1868 1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1869 1871 ofp = self.sopener(name, 'w')
1870 1872 for chunk in util.filechunkiter(fp, limit=size):
1871 1873 ofp.write(chunk)
1872 1874 ofp.close()
1873 1875 elapsed = time.time() - start
1874 1876 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1875 1877 (util.bytecount(total_bytes), elapsed,
1876 1878 util.bytecount(total_bytes / elapsed)))
1877 1879 self.reload()
1878 1880 return len(self.heads()) + 1
1879 1881
1880 1882 def clone(self, remote, heads=[], stream=False):
1881 1883 '''clone remote repository.
1882 1884
1883 1885 keyword arguments:
1884 1886 heads: list of revs to clone (forces use of pull)
1885 1887 stream: use streaming clone if possible'''
1886 1888
1887 1889 # now, all clients that can request uncompressed clones can
1888 1890 # read repo formats supported by all servers that can serve
1889 1891 # them.
1890 1892
1891 1893 # if revlog format changes, client will have to check version
1892 1894 # and format flags on "stream" capability, and use
1893 1895 # uncompressed only if compatible.
1894 1896
1895 1897 if stream and not heads and remote.capable('stream'):
1896 1898 return self.stream_in(remote)
1897 1899 return self.pull(remote, heads)
1898 1900
1899 1901 # used to avoid circular references so destructors work
1900 def aftertrans(base):
1901 p = base
1902 def aftertrans(files):
1903 renamefiles = [tuple(t) for t in files]
1902 1904 def a():
1903 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1904 util.rename(os.path.join(p, "journal.dirstate"),
1905 os.path.join(p, "undo.dirstate"))
1905 for src, dest in renamefiles:
1906 util.rename(src, dest)
1906 1907 return a
1907 1908
1908 1909 def instance(ui, path, create):
1909 1910 return localrepository(ui, util.drop_scheme('file', path), create)
1910 1911
1911 1912 def islocal(path):
1912 1913 return True
General Comments 0
You need to be logged in to leave comments. Login now