##// END OF EJS Templates
move code around
Benoit Boissinot -
r3850:a4457828 default
parent child Browse files
Show More
@@ -1,1927 +1,1929 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33
33 34 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
35 self.root = os.path.realpath(path)
36 self.origroot = path
37 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
35 39
36 40 if not os.path.isdir(self.path):
37 41 if create:
38 42 if not os.path.exists(path):
39 43 os.mkdir(path)
40 44 os.mkdir(self.path)
41 if self.spath != self.path:
42 os.mkdir(self.spath)
45 #if self.spath != self.path:
46 # os.mkdir(self.spath)
43 47 else:
44 48 raise repo.RepoError(_("repository %s not found") % path)
45 49 elif create:
46 50 raise repo.RepoError(_("repository %s already exists") % path)
47 51
48 self.root = os.path.realpath(path)
49 self.origroot = path
52 # setup store
53 self.spath = self.path
54 self.sopener = util.opener(self.spath)
55
50 56 self.ui = ui.ui(parentui=parentui)
51 self.opener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
53 self.wopener = util.opener(self.root)
54
55 57 try:
56 58 self.ui.readconfig(self.join("hgrc"), self.root)
57 59 except IOError:
58 60 pass
59 61
60 62 v = self.ui.configrevlog()
61 63 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
62 64 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
63 65 fl = v.get('flags', None)
64 66 flags = 0
65 67 if fl != None:
66 68 for x in fl.split():
67 69 flags |= revlog.flagstr(x)
68 70 elif self.revlogv1:
69 71 flags = revlog.REVLOG_DEFAULT_FLAGS
70 72
71 73 v = self.revlogversion | flags
72 74 self.manifest = manifest.manifest(self.sopener, v)
73 75 self.changelog = changelog.changelog(self.sopener, v)
74 76
75 77 # the changelog might not have the inline index flag
76 78 # on. If the format of the changelog is the same as found in
77 79 # .hgrc, apply any flags found in the .hgrc as well.
78 80 # Otherwise, just version from the changelog
79 81 v = self.changelog.version
80 82 if v == self.revlogversion:
81 83 v |= flags
82 84 self.revlogversion = v
83 85
84 86 self.tagscache = None
85 87 self.branchcache = None
86 88 self.nodetagscache = None
87 89 self.encodepats = None
88 90 self.decodepats = None
89 91 self.transhandle = None
90 92
91 93 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
92 94
93 95 def url(self):
94 96 return 'file:' + self.root
95 97
96 98 def hook(self, name, throw=False, **args):
97 99 def callhook(hname, funcname):
98 100 '''call python hook. hook is callable object, looked up as
99 101 name in python module. if callable returns "true", hook
100 102 fails, else passes. if hook raises exception, treated as
101 103 hook failure. exception propagates if throw is "true".
102 104
103 105 reason for "true" meaning "hook failed" is so that
104 106 unmodified commands (e.g. mercurial.commands.update) can
105 107 be run as hooks without wrappers to convert return values.'''
106 108
107 109 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
108 110 d = funcname.rfind('.')
109 111 if d == -1:
110 112 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
111 113 % (hname, funcname))
112 114 modname = funcname[:d]
113 115 try:
114 116 obj = __import__(modname)
115 117 except ImportError:
116 118 try:
117 119 # extensions are loaded with hgext_ prefix
118 120 obj = __import__("hgext_%s" % modname)
119 121 except ImportError:
120 122 raise util.Abort(_('%s hook is invalid '
121 123 '(import of "%s" failed)') %
122 124 (hname, modname))
123 125 try:
124 126 for p in funcname.split('.')[1:]:
125 127 obj = getattr(obj, p)
126 128 except AttributeError, err:
127 129 raise util.Abort(_('%s hook is invalid '
128 130 '("%s" is not defined)') %
129 131 (hname, funcname))
130 132 if not callable(obj):
131 133 raise util.Abort(_('%s hook is invalid '
132 134 '("%s" is not callable)') %
133 135 (hname, funcname))
134 136 try:
135 137 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
136 138 except (KeyboardInterrupt, util.SignalInterrupt):
137 139 raise
138 140 except Exception, exc:
139 141 if isinstance(exc, util.Abort):
140 142 self.ui.warn(_('error: %s hook failed: %s\n') %
141 143 (hname, exc.args[0]))
142 144 else:
143 145 self.ui.warn(_('error: %s hook raised an exception: '
144 146 '%s\n') % (hname, exc))
145 147 if throw:
146 148 raise
147 149 self.ui.print_exc()
148 150 return True
149 151 if r:
150 152 if throw:
151 153 raise util.Abort(_('%s hook failed') % hname)
152 154 self.ui.warn(_('warning: %s hook failed\n') % hname)
153 155 return r
154 156
155 157 def runhook(name, cmd):
156 158 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
157 159 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
158 160 r = util.system(cmd, environ=env, cwd=self.root)
159 161 if r:
160 162 desc, r = util.explain_exit(r)
161 163 if throw:
162 164 raise util.Abort(_('%s hook %s') % (name, desc))
163 165 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
164 166 return r
165 167
166 168 r = False
167 169 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
168 170 if hname.split(".", 1)[0] == name and cmd]
169 171 hooks.sort()
170 172 for hname, cmd in hooks:
171 173 if cmd.startswith('python:'):
172 174 r = callhook(hname, cmd[7:].strip()) or r
173 175 else:
174 176 r = runhook(hname, cmd) or r
175 177 return r
176 178
177 179 tag_disallowed = ':\r\n'
178 180
179 181 def tag(self, name, node, message, local, user, date):
180 182 '''tag a revision with a symbolic name.
181 183
182 184 if local is True, the tag is stored in a per-repository file.
183 185 otherwise, it is stored in the .hgtags file, and a new
184 186 changeset is committed with the change.
185 187
186 188 keyword arguments:
187 189
188 190 local: whether to store tag in non-version-controlled file
189 191 (default False)
190 192
191 193 message: commit message to use if committing
192 194
193 195 user: name of user to use if committing
194 196
195 197 date: date tuple to use if committing'''
196 198
197 199 for c in self.tag_disallowed:
198 200 if c in name:
199 201 raise util.Abort(_('%r cannot be used in a tag name') % c)
200 202
201 203 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
202 204
203 205 if local:
204 206 # local tags are stored in the current charset
205 207 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
206 208 self.hook('tag', node=hex(node), tag=name, local=local)
207 209 return
208 210
209 211 for x in self.status()[:5]:
210 212 if '.hgtags' in x:
211 213 raise util.Abort(_('working copy of .hgtags is changed '
212 214 '(please commit .hgtags manually)'))
213 215
214 216 # committed tags are stored in UTF-8
215 217 line = '%s %s\n' % (hex(node), util.fromlocal(name))
216 218 self.wfile('.hgtags', 'ab').write(line)
217 219 if self.dirstate.state('.hgtags') == '?':
218 220 self.add(['.hgtags'])
219 221
220 222 self.commit(['.hgtags'], message, user, date)
221 223 self.hook('tag', node=hex(node), tag=name, local=local)
222 224
223 225 def tags(self):
224 226 '''return a mapping of tag to node'''
225 227 if not self.tagscache:
226 228 self.tagscache = {}
227 229
228 230 def parsetag(line, context):
229 231 if not line:
230 232 return
231 233 s = l.split(" ", 1)
232 234 if len(s) != 2:
233 235 self.ui.warn(_("%s: cannot parse entry\n") % context)
234 236 return
235 237 node, key = s
236 238 key = util.tolocal(key.strip()) # stored in UTF-8
237 239 try:
238 240 bin_n = bin(node)
239 241 except TypeError:
240 242 self.ui.warn(_("%s: node '%s' is not well formed\n") %
241 243 (context, node))
242 244 return
243 245 if bin_n not in self.changelog.nodemap:
244 246 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
245 247 (context, key))
246 248 return
247 249 self.tagscache[key] = bin_n
248 250
249 251 # read the tags file from each head, ending with the tip,
250 252 # and add each tag found to the map, with "newer" ones
251 253 # taking precedence
252 254 f = None
253 255 for rev, node, fnode in self._hgtagsnodes():
254 256 f = (f and f.filectx(fnode) or
255 257 self.filectx('.hgtags', fileid=fnode))
256 258 count = 0
257 259 for l in f.data().splitlines():
258 260 count += 1
259 261 parsetag(l, _("%s, line %d") % (str(f), count))
260 262
261 263 try:
262 264 f = self.opener("localtags")
263 265 count = 0
264 266 for l in f:
265 267 # localtags are stored in the local character set
266 268 # while the internal tag table is stored in UTF-8
267 269 l = util.fromlocal(l)
268 270 count += 1
269 271 parsetag(l, _("localtags, line %d") % count)
270 272 except IOError:
271 273 pass
272 274
273 275 self.tagscache['tip'] = self.changelog.tip()
274 276
275 277 return self.tagscache
276 278
277 279 def _hgtagsnodes(self):
278 280 heads = self.heads()
279 281 heads.reverse()
280 282 last = {}
281 283 ret = []
282 284 for node in heads:
283 285 c = self.changectx(node)
284 286 rev = c.rev()
285 287 try:
286 288 fnode = c.filenode('.hgtags')
287 289 except repo.LookupError:
288 290 continue
289 291 ret.append((rev, node, fnode))
290 292 if fnode in last:
291 293 ret[last[fnode]] = None
292 294 last[fnode] = len(ret) - 1
293 295 return [item for item in ret if item]
294 296
295 297 def tagslist(self):
296 298 '''return a list of tags ordered by revision'''
297 299 l = []
298 300 for t, n in self.tags().items():
299 301 try:
300 302 r = self.changelog.rev(n)
301 303 except:
302 304 r = -2 # sort to the beginning of the list if unknown
303 305 l.append((r, t, n))
304 306 l.sort()
305 307 return [(t, n) for r, t, n in l]
306 308
307 309 def nodetags(self, node):
308 310 '''return the tags associated with a node'''
309 311 if not self.nodetagscache:
310 312 self.nodetagscache = {}
311 313 for t, n in self.tags().items():
312 314 self.nodetagscache.setdefault(n, []).append(t)
313 315 return self.nodetagscache.get(node, [])
314 316
315 317 def branchtags(self):
316 318 if self.branchcache != None:
317 319 return self.branchcache
318 320
319 321 self.branchcache = {} # avoid recursion in changectx
320 322
321 323 partial, last, lrev = self._readbranchcache()
322 324
323 325 tiprev = self.changelog.count() - 1
324 326 if lrev != tiprev:
325 327 self._updatebranchcache(partial, lrev+1, tiprev+1)
326 328 self._writebranchcache(partial, self.changelog.tip(), tiprev)
327 329
328 330 # the branch cache is stored on disk as UTF-8, but in the local
329 331 # charset internally
330 332 for k, v in partial.items():
331 333 self.branchcache[util.tolocal(k)] = v
332 334 return self.branchcache
333 335
334 336 def _readbranchcache(self):
335 337 partial = {}
336 338 try:
337 339 f = self.opener("branches.cache")
338 340 lines = f.read().split('\n')
339 341 f.close()
340 342 last, lrev = lines.pop(0).rstrip().split(" ", 1)
341 343 last, lrev = bin(last), int(lrev)
342 344 if not (lrev < self.changelog.count() and
343 345 self.changelog.node(lrev) == last): # sanity check
344 346 # invalidate the cache
345 347 raise ValueError('Invalid branch cache: unknown tip')
346 348 for l in lines:
347 349 if not l: continue
348 350 node, label = l.rstrip().split(" ", 1)
349 351 partial[label] = bin(node)
350 352 except (KeyboardInterrupt, util.SignalInterrupt):
351 353 raise
352 354 except Exception, inst:
353 355 if self.ui.debugflag:
354 356 self.ui.warn(str(inst), '\n')
355 357 partial, last, lrev = {}, nullid, nullrev
356 358 return partial, last, lrev
357 359
358 360 def _writebranchcache(self, branches, tip, tiprev):
359 361 try:
360 362 f = self.opener("branches.cache", "w")
361 363 f.write("%s %s\n" % (hex(tip), tiprev))
362 364 for label, node in branches.iteritems():
363 365 f.write("%s %s\n" % (hex(node), label))
364 366 except IOError:
365 367 pass
366 368
367 369 def _updatebranchcache(self, partial, start, end):
368 370 for r in xrange(start, end):
369 371 c = self.changectx(r)
370 372 b = c.branch()
371 373 if b:
372 374 partial[b] = c.node()
373 375
374 376 def lookup(self, key):
375 377 if key == '.':
376 378 key = self.dirstate.parents()[0]
377 379 if key == nullid:
378 380 raise repo.RepoError(_("no revision checked out"))
379 381 elif key == 'null':
380 382 return nullid
381 383 n = self.changelog._match(key)
382 384 if n:
383 385 return n
384 386 if key in self.tags():
385 387 return self.tags()[key]
386 388 if key in self.branchtags():
387 389 return self.branchtags()[key]
388 390 n = self.changelog._partialmatch(key)
389 391 if n:
390 392 return n
391 393 raise repo.RepoError(_("unknown revision '%s'") % key)
392 394
393 395 def dev(self):
394 396 return os.lstat(self.path).st_dev
395 397
396 398 def local(self):
397 399 return True
398 400
399 401 def join(self, f):
400 402 return os.path.join(self.path, f)
401 403
402 404 def sjoin(self, f):
403 405 return os.path.join(self.spath, f)
404 406
405 407 def wjoin(self, f):
406 408 return os.path.join(self.root, f)
407 409
408 410 def file(self, f):
409 411 if f[0] == '/':
410 412 f = f[1:]
411 413 return filelog.filelog(self.sopener, f, self.revlogversion)
412 414
413 415 def changectx(self, changeid=None):
414 416 return context.changectx(self, changeid)
415 417
416 418 def workingctx(self):
417 419 return context.workingctx(self)
418 420
419 421 def parents(self, changeid=None):
420 422 '''
421 423 get list of changectxs for parents of changeid or working directory
422 424 '''
423 425 if changeid is None:
424 426 pl = self.dirstate.parents()
425 427 else:
426 428 n = self.changelog.lookup(changeid)
427 429 pl = self.changelog.parents(n)
428 430 if pl[1] == nullid:
429 431 return [self.changectx(pl[0])]
430 432 return [self.changectx(pl[0]), self.changectx(pl[1])]
431 433
432 434 def filectx(self, path, changeid=None, fileid=None):
433 435 """changeid can be a changeset revision, node, or tag.
434 436 fileid can be a file revision or node."""
435 437 return context.filectx(self, path, changeid, fileid)
436 438
437 439 def getcwd(self):
438 440 return self.dirstate.getcwd()
439 441
440 442 def wfile(self, f, mode='r'):
441 443 return self.wopener(f, mode)
442 444
443 445 def wread(self, filename):
444 446 if self.encodepats == None:
445 447 l = []
446 448 for pat, cmd in self.ui.configitems("encode"):
447 449 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 450 l.append((mf, cmd))
449 451 self.encodepats = l
450 452
451 453 data = self.wopener(filename, 'r').read()
452 454
453 455 for mf, cmd in self.encodepats:
454 456 if mf(filename):
455 457 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
456 458 data = util.filter(data, cmd)
457 459 break
458 460
459 461 return data
460 462
461 463 def wwrite(self, filename, data, fd=None):
462 464 if self.decodepats == None:
463 465 l = []
464 466 for pat, cmd in self.ui.configitems("decode"):
465 467 mf = util.matcher(self.root, "", [pat], [], [])[1]
466 468 l.append((mf, cmd))
467 469 self.decodepats = l
468 470
469 471 for mf, cmd in self.decodepats:
470 472 if mf(filename):
471 473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
472 474 data = util.filter(data, cmd)
473 475 break
474 476
475 477 if fd:
476 478 return fd.write(data)
477 479 return self.wopener(filename, 'w').write(data)
478 480
479 481 def transaction(self):
480 482 tr = self.transhandle
481 483 if tr != None and tr.running():
482 484 return tr.nest()
483 485
484 486 # save dirstate for rollback
485 487 try:
486 488 ds = self.opener("dirstate").read()
487 489 except IOError:
488 490 ds = ""
489 491 self.opener("journal.dirstate", "w").write(ds)
490 492
491 493 renames = [(self.sjoin("journal"), self.sjoin("undo")),
492 494 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
493 495 tr = transaction.transaction(self.ui.warn, self.sopener,
494 496 self.sjoin("journal"),
495 497 aftertrans(renames))
496 498 self.transhandle = tr
497 499 return tr
498 500
499 501 def recover(self):
500 502 l = self.lock()
501 503 if os.path.exists(self.sjoin("journal")):
502 504 self.ui.status(_("rolling back interrupted transaction\n"))
503 505 transaction.rollback(self.sopener, self.sjoin("journal"))
504 506 self.reload()
505 507 return True
506 508 else:
507 509 self.ui.warn(_("no interrupted transaction available\n"))
508 510 return False
509 511
510 512 def rollback(self, wlock=None):
511 513 if not wlock:
512 514 wlock = self.wlock()
513 515 l = self.lock()
514 516 if os.path.exists(self.sjoin("undo")):
515 517 self.ui.status(_("rolling back last transaction\n"))
516 518 transaction.rollback(self.sopener, self.sjoin("undo"))
517 519 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
518 520 self.reload()
519 521 self.wreload()
520 522 else:
521 523 self.ui.warn(_("no rollback information available\n"))
522 524
523 525 def wreload(self):
524 526 self.dirstate.read()
525 527
526 528 def reload(self):
527 529 self.changelog.load()
528 530 self.manifest.load()
529 531 self.tagscache = None
530 532 self.nodetagscache = None
531 533
532 534 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
533 535 desc=None):
534 536 try:
535 537 l = lock.lock(lockname, 0, releasefn, desc=desc)
536 538 except lock.LockHeld, inst:
537 539 if not wait:
538 540 raise
539 541 self.ui.warn(_("waiting for lock on %s held by %r\n") %
540 542 (desc, inst.locker))
541 543 # default to 600 seconds timeout
542 544 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
543 545 releasefn, desc=desc)
544 546 if acquirefn:
545 547 acquirefn()
546 548 return l
547 549
548 550 def lock(self, wait=1):
549 551 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
550 552 desc=_('repository %s') % self.origroot)
551 553
552 554 def wlock(self, wait=1):
553 555 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
554 556 self.wreload,
555 557 desc=_('working directory of %s') % self.origroot)
556 558
557 559 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
558 560 """
559 561 commit an individual file as part of a larger transaction
560 562 """
561 563
562 564 t = self.wread(fn)
563 565 fl = self.file(fn)
564 566 fp1 = manifest1.get(fn, nullid)
565 567 fp2 = manifest2.get(fn, nullid)
566 568
567 569 meta = {}
568 570 cp = self.dirstate.copied(fn)
569 571 if cp:
570 572 meta["copy"] = cp
571 573 if not manifest2: # not a branch merge
572 574 meta["copyrev"] = hex(manifest1.get(cp, nullid))
573 575 fp2 = nullid
574 576 elif fp2 != nullid: # copied on remote side
575 577 meta["copyrev"] = hex(manifest1.get(cp, nullid))
576 578 elif fp1 != nullid: # copied on local side, reversed
577 579 meta["copyrev"] = hex(manifest2.get(cp))
578 580 fp2 = nullid
579 581 else: # directory rename
580 582 meta["copyrev"] = hex(manifest1.get(cp, nullid))
581 583 self.ui.debug(_(" %s: copy %s:%s\n") %
582 584 (fn, cp, meta["copyrev"]))
583 585 fp1 = nullid
584 586 elif fp2 != nullid:
585 587 # is one parent an ancestor of the other?
586 588 fpa = fl.ancestor(fp1, fp2)
587 589 if fpa == fp1:
588 590 fp1, fp2 = fp2, nullid
589 591 elif fpa == fp2:
590 592 fp2 = nullid
591 593
592 594 # is the file unmodified from the parent? report existing entry
593 595 if fp2 == nullid and not fl.cmp(fp1, t):
594 596 return fp1
595 597
596 598 changelist.append(fn)
597 599 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
598 600
599 601 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
600 602 if p1 is None:
601 603 p1, p2 = self.dirstate.parents()
602 604 return self.commit(files=files, text=text, user=user, date=date,
603 605 p1=p1, p2=p2, wlock=wlock)
604 606
605 607 def commit(self, files=None, text="", user=None, date=None,
606 608 match=util.always, force=False, lock=None, wlock=None,
607 609 force_editor=False, p1=None, p2=None, extra={}):
608 610
609 611 commit = []
610 612 remove = []
611 613 changed = []
612 614 use_dirstate = (p1 is None) # not rawcommit
613 615 extra = extra.copy()
614 616
615 617 if use_dirstate:
616 618 if files:
617 619 for f in files:
618 620 s = self.dirstate.state(f)
619 621 if s in 'nmai':
620 622 commit.append(f)
621 623 elif s == 'r':
622 624 remove.append(f)
623 625 else:
624 626 self.ui.warn(_("%s not tracked!\n") % f)
625 627 else:
626 628 changes = self.status(match=match)[:5]
627 629 modified, added, removed, deleted, unknown = changes
628 630 commit = modified + added
629 631 remove = removed
630 632 else:
631 633 commit = files
632 634
633 635 if use_dirstate:
634 636 p1, p2 = self.dirstate.parents()
635 637 update_dirstate = True
636 638 else:
637 639 p1, p2 = p1, p2 or nullid
638 640 update_dirstate = (self.dirstate.parents()[0] == p1)
639 641
640 642 c1 = self.changelog.read(p1)
641 643 c2 = self.changelog.read(p2)
642 644 m1 = self.manifest.read(c1[0]).copy()
643 645 m2 = self.manifest.read(c2[0])
644 646
645 647 if use_dirstate:
646 648 branchname = util.fromlocal(self.workingctx().branch())
647 649 else:
648 650 branchname = ""
649 651
650 652 if use_dirstate:
651 653 oldname = c1[5].get("branch", "") # stored in UTF-8
652 654 if not commit and not remove and not force and p2 == nullid and \
653 655 branchname == oldname:
654 656 self.ui.status(_("nothing changed\n"))
655 657 return None
656 658
657 659 xp1 = hex(p1)
658 660 if p2 == nullid: xp2 = ''
659 661 else: xp2 = hex(p2)
660 662
661 663 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
662 664
663 665 if not wlock:
664 666 wlock = self.wlock()
665 667 if not lock:
666 668 lock = self.lock()
667 669 tr = self.transaction()
668 670
669 671 # check in files
670 672 new = {}
671 673 linkrev = self.changelog.count()
672 674 commit.sort()
673 675 for f in commit:
674 676 self.ui.note(f + "\n")
675 677 try:
676 678 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
677 679 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
678 680 except IOError:
679 681 if use_dirstate:
680 682 self.ui.warn(_("trouble committing %s!\n") % f)
681 683 raise
682 684 else:
683 685 remove.append(f)
684 686
685 687 # update manifest
686 688 m1.update(new)
687 689 remove.sort()
688 690
689 691 for f in remove:
690 692 if f in m1:
691 693 del m1[f]
692 694 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
693 695
694 696 # add changeset
695 697 new = new.keys()
696 698 new.sort()
697 699
698 700 user = user or self.ui.username()
699 701 if not text or force_editor:
700 702 edittext = []
701 703 if text:
702 704 edittext.append(text)
703 705 edittext.append("")
704 706 edittext.append("HG: user: %s" % user)
705 707 if p2 != nullid:
706 708 edittext.append("HG: branch merge")
707 709 edittext.extend(["HG: changed %s" % f for f in changed])
708 710 edittext.extend(["HG: removed %s" % f for f in remove])
709 711 if not changed and not remove:
710 712 edittext.append("HG: no files changed")
711 713 edittext.append("")
712 714 # run editor in the repository root
713 715 olddir = os.getcwd()
714 716 os.chdir(self.root)
715 717 text = self.ui.edit("\n".join(edittext), user)
716 718 os.chdir(olddir)
717 719
718 720 lines = [line.rstrip() for line in text.rstrip().splitlines()]
719 721 while lines and not lines[0]:
720 722 del lines[0]
721 723 if not lines:
722 724 return None
723 725 text = '\n'.join(lines)
724 726 if branchname:
725 727 extra["branch"] = branchname
726 728 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
727 729 user, date, extra)
728 730 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
729 731 parent2=xp2)
730 732 tr.close()
731 733
732 734 if use_dirstate or update_dirstate:
733 735 self.dirstate.setparents(n)
734 736 if use_dirstate:
735 737 self.dirstate.update(new, "n")
736 738 self.dirstate.forget(remove)
737 739
738 740 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
739 741 return n
740 742
741 743 def walk(self, node=None, files=[], match=util.always, badmatch=None):
742 744 '''
743 745 walk recursively through the directory tree or a given
744 746 changeset, finding all files matched by the match
745 747 function
746 748
747 749 results are yielded in a tuple (src, filename), where src
748 750 is one of:
749 751 'f' the file was found in the directory tree
750 752 'm' the file was only in the dirstate and not in the tree
751 753 'b' file was not found and matched badmatch
752 754 '''
753 755
754 756 if node:
755 757 fdict = dict.fromkeys(files)
756 758 for fn in self.manifest.read(self.changelog.read(node)[0]):
757 759 for ffn in fdict:
758 760 # match if the file is the exact name or a directory
759 761 if ffn == fn or fn.startswith("%s/" % ffn):
760 762 del fdict[ffn]
761 763 break
762 764 if match(fn):
763 765 yield 'm', fn
764 766 for fn in fdict:
765 767 if badmatch and badmatch(fn):
766 768 if match(fn):
767 769 yield 'b', fn
768 770 else:
769 771 self.ui.warn(_('%s: No such file in rev %s\n') % (
770 772 util.pathto(self.getcwd(), fn), short(node)))
771 773 else:
772 774 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
773 775 yield src, fn
774 776
775 777 def status(self, node1=None, node2=None, files=[], match=util.always,
776 778 wlock=None, list_ignored=False, list_clean=False):
777 779 """return status of files between two nodes or node and working directory
778 780
779 781 If node1 is None, use the first dirstate parent instead.
780 782 If node2 is None, compare node1 with working directory.
781 783 """
782 784
783 785 def fcmp(fn, mf):
784 786 t1 = self.wread(fn)
785 787 return self.file(fn).cmp(mf.get(fn, nullid), t1)
786 788
787 789 def mfmatches(node):
788 790 change = self.changelog.read(node)
789 791 mf = self.manifest.read(change[0]).copy()
790 792 for fn in mf.keys():
791 793 if not match(fn):
792 794 del mf[fn]
793 795 return mf
794 796
795 797 modified, added, removed, deleted, unknown = [], [], [], [], []
796 798 ignored, clean = [], []
797 799
798 800 compareworking = False
799 801 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
800 802 compareworking = True
801 803
802 804 if not compareworking:
803 805 # read the manifest from node1 before the manifest from node2,
804 806 # so that we'll hit the manifest cache if we're going through
805 807 # all the revisions in parent->child order.
806 808 mf1 = mfmatches(node1)
807 809
808 810 # are we comparing the working directory?
809 811 if not node2:
810 812 if not wlock:
811 813 try:
812 814 wlock = self.wlock(wait=0)
813 815 except lock.LockException:
814 816 wlock = None
815 817 (lookup, modified, added, removed, deleted, unknown,
816 818 ignored, clean) = self.dirstate.status(files, match,
817 819 list_ignored, list_clean)
818 820
819 821 # are we comparing working dir against its parent?
820 822 if compareworking:
821 823 if lookup:
822 824 # do a full compare of any files that might have changed
823 825 mf2 = mfmatches(self.dirstate.parents()[0])
824 826 for f in lookup:
825 827 if fcmp(f, mf2):
826 828 modified.append(f)
827 829 else:
828 830 clean.append(f)
829 831 if wlock is not None:
830 832 self.dirstate.update([f], "n")
831 833 else:
832 834 # we are comparing working dir against non-parent
833 835 # generate a pseudo-manifest for the working dir
834 836 # XXX: create it in dirstate.py ?
835 837 mf2 = mfmatches(self.dirstate.parents()[0])
836 838 for f in lookup + modified + added:
837 839 mf2[f] = ""
838 840 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
839 841 for f in removed:
840 842 if f in mf2:
841 843 del mf2[f]
842 844 else:
843 845 # we are comparing two revisions
844 846 mf2 = mfmatches(node2)
845 847
846 848 if not compareworking:
847 849 # flush lists from dirstate before comparing manifests
848 850 modified, added, clean = [], [], []
849 851
850 852 # make sure to sort the files so we talk to the disk in a
851 853 # reasonable order
852 854 mf2keys = mf2.keys()
853 855 mf2keys.sort()
854 856 for fn in mf2keys:
855 857 if mf1.has_key(fn):
856 858 if mf1.flags(fn) != mf2.flags(fn) or \
857 859 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
858 860 modified.append(fn)
859 861 elif list_clean:
860 862 clean.append(fn)
861 863 del mf1[fn]
862 864 else:
863 865 added.append(fn)
864 866
865 867 removed = mf1.keys()
866 868
867 869 # sort and return results:
868 870 for l in modified, added, removed, deleted, unknown, ignored, clean:
869 871 l.sort()
870 872 return (modified, added, removed, deleted, unknown, ignored, clean)
871 873
872 874 def add(self, list, wlock=None):
873 875 if not wlock:
874 876 wlock = self.wlock()
875 877 for f in list:
876 878 p = self.wjoin(f)
877 879 if not os.path.exists(p):
878 880 self.ui.warn(_("%s does not exist!\n") % f)
879 881 elif not os.path.isfile(p):
880 882 self.ui.warn(_("%s not added: only files supported currently\n")
881 883 % f)
882 884 elif self.dirstate.state(f) in 'an':
883 885 self.ui.warn(_("%s already tracked!\n") % f)
884 886 else:
885 887 self.dirstate.update([f], "a")
886 888
887 889 def forget(self, list, wlock=None):
888 890 if not wlock:
889 891 wlock = self.wlock()
890 892 for f in list:
891 893 if self.dirstate.state(f) not in 'ai':
892 894 self.ui.warn(_("%s not added!\n") % f)
893 895 else:
894 896 self.dirstate.forget([f])
895 897
896 898 def remove(self, list, unlink=False, wlock=None):
897 899 if unlink:
898 900 for f in list:
899 901 try:
900 902 util.unlink(self.wjoin(f))
901 903 except OSError, inst:
902 904 if inst.errno != errno.ENOENT:
903 905 raise
904 906 if not wlock:
905 907 wlock = self.wlock()
906 908 for f in list:
907 909 p = self.wjoin(f)
908 910 if os.path.exists(p):
909 911 self.ui.warn(_("%s still exists!\n") % f)
910 912 elif self.dirstate.state(f) == 'a':
911 913 self.dirstate.forget([f])
912 914 elif f not in self.dirstate:
913 915 self.ui.warn(_("%s not tracked!\n") % f)
914 916 else:
915 917 self.dirstate.update([f], "r")
916 918
917 919 def undelete(self, list, wlock=None):
918 920 p = self.dirstate.parents()[0]
919 921 mn = self.changelog.read(p)[0]
920 922 m = self.manifest.read(mn)
921 923 if not wlock:
922 924 wlock = self.wlock()
923 925 for f in list:
924 926 if self.dirstate.state(f) not in "r":
925 927 self.ui.warn("%s not removed!\n" % f)
926 928 else:
927 929 t = self.file(f).read(m[f])
928 930 self.wwrite(f, t)
929 931 util.set_exec(self.wjoin(f), m.execf(f))
930 932 self.dirstate.update([f], "n")
931 933
932 934 def copy(self, source, dest, wlock=None):
933 935 p = self.wjoin(dest)
934 936 if not os.path.exists(p):
935 937 self.ui.warn(_("%s does not exist!\n") % dest)
936 938 elif not os.path.isfile(p):
937 939 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
938 940 else:
939 941 if not wlock:
940 942 wlock = self.wlock()
941 943 if self.dirstate.state(dest) == '?':
942 944 self.dirstate.update([dest], "a")
943 945 self.dirstate.copy(source, dest)
944 946
945 947 def heads(self, start=None):
946 948 heads = self.changelog.heads(start)
947 949 # sort the output in rev descending order
948 950 heads = [(-self.changelog.rev(h), h) for h in heads]
949 951 heads.sort()
950 952 return [n for (r, n) in heads]
951 953
952 954 # branchlookup returns a dict giving a list of branches for
953 955 # each head. A branch is defined as the tag of a node or
954 956 # the branch of the node's parents. If a node has multiple
955 957 # branch tags, tags are eliminated if they are visible from other
956 958 # branch tags.
957 959 #
958 960 # So, for this graph: a->b->c->d->e
959 961 # \ /
960 962 # aa -----/
961 963 # a has tag 2.6.12
962 964 # d has tag 2.6.13
963 965 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
964 966 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
965 967 # from the list.
966 968 #
967 969 # It is possible that more than one head will have the same branch tag.
968 970 # callers need to check the result for multiple heads under the same
969 971 # branch tag if that is a problem for them (ie checkout of a specific
970 972 # branch).
971 973 #
972 974 # passing in a specific branch will limit the depth of the search
973 975 # through the parents. It won't limit the branches returned in the
974 976 # result though.
975 977 def branchlookup(self, heads=None, branch=None):
976 978 if not heads:
977 979 heads = self.heads()
978 980 headt = [ h for h in heads ]
979 981 chlog = self.changelog
980 982 branches = {}
981 983 merges = []
982 984 seenmerge = {}
983 985
984 986 # traverse the tree once for each head, recording in the branches
985 987 # dict which tags are visible from this head. The branches
986 988 # dict also records which tags are visible from each tag
987 989 # while we traverse.
988 990 while headt or merges:
989 991 if merges:
990 992 n, found = merges.pop()
991 993 visit = [n]
992 994 else:
993 995 h = headt.pop()
994 996 visit = [h]
995 997 found = [h]
996 998 seen = {}
997 999 while visit:
998 1000 n = visit.pop()
999 1001 if n in seen:
1000 1002 continue
1001 1003 pp = chlog.parents(n)
1002 1004 tags = self.nodetags(n)
1003 1005 if tags:
1004 1006 for x in tags:
1005 1007 if x == 'tip':
1006 1008 continue
1007 1009 for f in found:
1008 1010 branches.setdefault(f, {})[n] = 1
1009 1011 branches.setdefault(n, {})[n] = 1
1010 1012 break
1011 1013 if n not in found:
1012 1014 found.append(n)
1013 1015 if branch in tags:
1014 1016 continue
1015 1017 seen[n] = 1
1016 1018 if pp[1] != nullid and n not in seenmerge:
1017 1019 merges.append((pp[1], [x for x in found]))
1018 1020 seenmerge[n] = 1
1019 1021 if pp[0] != nullid:
1020 1022 visit.append(pp[0])
1021 1023 # traverse the branches dict, eliminating branch tags from each
1022 1024 # head that are visible from another branch tag for that head.
1023 1025 out = {}
1024 1026 viscache = {}
1025 1027 for h in heads:
1026 1028 def visible(node):
1027 1029 if node in viscache:
1028 1030 return viscache[node]
1029 1031 ret = {}
1030 1032 visit = [node]
1031 1033 while visit:
1032 1034 x = visit.pop()
1033 1035 if x in viscache:
1034 1036 ret.update(viscache[x])
1035 1037 elif x not in ret:
1036 1038 ret[x] = 1
1037 1039 if x in branches:
1038 1040 visit[len(visit):] = branches[x].keys()
1039 1041 viscache[node] = ret
1040 1042 return ret
1041 1043 if h not in branches:
1042 1044 continue
1043 1045 # O(n^2), but somewhat limited. This only searches the
1044 1046 # tags visible from a specific head, not all the tags in the
1045 1047 # whole repo.
1046 1048 for b in branches[h]:
1047 1049 vis = False
1048 1050 for bb in branches[h].keys():
1049 1051 if b != bb:
1050 1052 if b in visible(bb):
1051 1053 vis = True
1052 1054 break
1053 1055 if not vis:
1054 1056 l = out.setdefault(h, [])
1055 1057 l[len(l):] = self.nodetags(b)
1056 1058 return out
1057 1059
1058 1060 def branches(self, nodes):
1059 1061 if not nodes:
1060 1062 nodes = [self.changelog.tip()]
1061 1063 b = []
1062 1064 for n in nodes:
1063 1065 t = n
1064 1066 while 1:
1065 1067 p = self.changelog.parents(n)
1066 1068 if p[1] != nullid or p[0] == nullid:
1067 1069 b.append((t, n, p[0], p[1]))
1068 1070 break
1069 1071 n = p[0]
1070 1072 return b
1071 1073
1072 1074 def between(self, pairs):
1073 1075 r = []
1074 1076
1075 1077 for top, bottom in pairs:
1076 1078 n, l, i = top, [], 0
1077 1079 f = 1
1078 1080
1079 1081 while n != bottom:
1080 1082 p = self.changelog.parents(n)[0]
1081 1083 if i == f:
1082 1084 l.append(n)
1083 1085 f = f * 2
1084 1086 n = p
1085 1087 i += 1
1086 1088
1087 1089 r.append(l)
1088 1090
1089 1091 return r
1090 1092
1091 1093 def findincoming(self, remote, base=None, heads=None, force=False):
1092 1094 """Return list of roots of the subsets of missing nodes from remote
1093 1095
1094 1096 If base dict is specified, assume that these nodes and their parents
1095 1097 exist on the remote side and that no child of a node of base exists
1096 1098 in both remote and self.
1097 1099 Furthermore base will be updated to include the nodes that exists
1098 1100 in self and remote but no children exists in self and remote.
1099 1101 If a list of heads is specified, return only nodes which are heads
1100 1102 or ancestors of these heads.
1101 1103
1102 1104 All the ancestors of base are in self and in remote.
1103 1105 All the descendants of the list returned are missing in self.
1104 1106 (and so we know that the rest of the nodes are missing in remote, see
1105 1107 outgoing)
1106 1108 """
1107 1109 m = self.changelog.nodemap
1108 1110 search = []
1109 1111 fetch = {}
1110 1112 seen = {}
1111 1113 seenbranch = {}
1112 1114 if base == None:
1113 1115 base = {}
1114 1116
1115 1117 if not heads:
1116 1118 heads = remote.heads()
1117 1119
1118 1120 if self.changelog.tip() == nullid:
1119 1121 base[nullid] = 1
1120 1122 if heads != [nullid]:
1121 1123 return [nullid]
1122 1124 return []
1123 1125
1124 1126 # assume we're closer to the tip than the root
1125 1127 # and start by examining the heads
1126 1128 self.ui.status(_("searching for changes\n"))
1127 1129
1128 1130 unknown = []
1129 1131 for h in heads:
1130 1132 if h not in m:
1131 1133 unknown.append(h)
1132 1134 else:
1133 1135 base[h] = 1
1134 1136
1135 1137 if not unknown:
1136 1138 return []
1137 1139
1138 1140 req = dict.fromkeys(unknown)
1139 1141 reqcnt = 0
1140 1142
1141 1143 # search through remote branches
1142 1144 # a 'branch' here is a linear segment of history, with four parts:
1143 1145 # head, root, first parent, second parent
1144 1146 # (a branch always has two parents (or none) by definition)
1145 1147 unknown = remote.branches(unknown)
1146 1148 while unknown:
1147 1149 r = []
1148 1150 while unknown:
1149 1151 n = unknown.pop(0)
1150 1152 if n[0] in seen:
1151 1153 continue
1152 1154
1153 1155 self.ui.debug(_("examining %s:%s\n")
1154 1156 % (short(n[0]), short(n[1])))
1155 1157 if n[0] == nullid: # found the end of the branch
1156 1158 pass
1157 1159 elif n in seenbranch:
1158 1160 self.ui.debug(_("branch already found\n"))
1159 1161 continue
1160 1162 elif n[1] and n[1] in m: # do we know the base?
1161 1163 self.ui.debug(_("found incomplete branch %s:%s\n")
1162 1164 % (short(n[0]), short(n[1])))
1163 1165 search.append(n) # schedule branch range for scanning
1164 1166 seenbranch[n] = 1
1165 1167 else:
1166 1168 if n[1] not in seen and n[1] not in fetch:
1167 1169 if n[2] in m and n[3] in m:
1168 1170 self.ui.debug(_("found new changeset %s\n") %
1169 1171 short(n[1]))
1170 1172 fetch[n[1]] = 1 # earliest unknown
1171 1173 for p in n[2:4]:
1172 1174 if p in m:
1173 1175 base[p] = 1 # latest known
1174 1176
1175 1177 for p in n[2:4]:
1176 1178 if p not in req and p not in m:
1177 1179 r.append(p)
1178 1180 req[p] = 1
1179 1181 seen[n[0]] = 1
1180 1182
1181 1183 if r:
1182 1184 reqcnt += 1
1183 1185 self.ui.debug(_("request %d: %s\n") %
1184 1186 (reqcnt, " ".join(map(short, r))))
1185 1187 for p in xrange(0, len(r), 10):
1186 1188 for b in remote.branches(r[p:p+10]):
1187 1189 self.ui.debug(_("received %s:%s\n") %
1188 1190 (short(b[0]), short(b[1])))
1189 1191 unknown.append(b)
1190 1192
1191 1193 # do binary search on the branches we found
1192 1194 while search:
1193 1195 n = search.pop(0)
1194 1196 reqcnt += 1
1195 1197 l = remote.between([(n[0], n[1])])[0]
1196 1198 l.append(n[1])
1197 1199 p = n[0]
1198 1200 f = 1
1199 1201 for i in l:
1200 1202 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1201 1203 if i in m:
1202 1204 if f <= 2:
1203 1205 self.ui.debug(_("found new branch changeset %s\n") %
1204 1206 short(p))
1205 1207 fetch[p] = 1
1206 1208 base[i] = 1
1207 1209 else:
1208 1210 self.ui.debug(_("narrowed branch search to %s:%s\n")
1209 1211 % (short(p), short(i)))
1210 1212 search.append((p, i))
1211 1213 break
1212 1214 p, f = i, f * 2
1213 1215
1214 1216 # sanity check our fetch list
1215 1217 for f in fetch.keys():
1216 1218 if f in m:
1217 1219 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1218 1220
1219 1221 if base.keys() == [nullid]:
1220 1222 if force:
1221 1223 self.ui.warn(_("warning: repository is unrelated\n"))
1222 1224 else:
1223 1225 raise util.Abort(_("repository is unrelated"))
1224 1226
1225 1227 self.ui.debug(_("found new changesets starting at ") +
1226 1228 " ".join([short(f) for f in fetch]) + "\n")
1227 1229
1228 1230 self.ui.debug(_("%d total queries\n") % reqcnt)
1229 1231
1230 1232 return fetch.keys()
1231 1233
1232 1234 def findoutgoing(self, remote, base=None, heads=None, force=False):
1233 1235 """Return list of nodes that are roots of subsets not in remote
1234 1236
1235 1237 If base dict is specified, assume that these nodes and their parents
1236 1238 exist on the remote side.
1237 1239 If a list of heads is specified, return only nodes which are heads
1238 1240 or ancestors of these heads, and return a second element which
1239 1241 contains all remote heads which get new children.
1240 1242 """
1241 1243 if base == None:
1242 1244 base = {}
1243 1245 self.findincoming(remote, base, heads, force=force)
1244 1246
1245 1247 self.ui.debug(_("common changesets up to ")
1246 1248 + " ".join(map(short, base.keys())) + "\n")
1247 1249
1248 1250 remain = dict.fromkeys(self.changelog.nodemap)
1249 1251
1250 1252 # prune everything remote has from the tree
1251 1253 del remain[nullid]
1252 1254 remove = base.keys()
1253 1255 while remove:
1254 1256 n = remove.pop(0)
1255 1257 if n in remain:
1256 1258 del remain[n]
1257 1259 for p in self.changelog.parents(n):
1258 1260 remove.append(p)
1259 1261
1260 1262 # find every node whose parents have been pruned
1261 1263 subset = []
1262 1264 # find every remote head that will get new children
1263 1265 updated_heads = {}
1264 1266 for n in remain:
1265 1267 p1, p2 = self.changelog.parents(n)
1266 1268 if p1 not in remain and p2 not in remain:
1267 1269 subset.append(n)
1268 1270 if heads:
1269 1271 if p1 in heads:
1270 1272 updated_heads[p1] = True
1271 1273 if p2 in heads:
1272 1274 updated_heads[p2] = True
1273 1275
1274 1276 # this is the set of all roots we have to push
1275 1277 if heads:
1276 1278 return subset, updated_heads.keys()
1277 1279 else:
1278 1280 return subset
1279 1281
1280 1282 def pull(self, remote, heads=None, force=False, lock=None):
1281 1283 mylock = False
1282 1284 if not lock:
1283 1285 lock = self.lock()
1284 1286 mylock = True
1285 1287
1286 1288 try:
1287 1289 fetch = self.findincoming(remote, force=force)
1288 1290 if fetch == [nullid]:
1289 1291 self.ui.status(_("requesting all changes\n"))
1290 1292
1291 1293 if not fetch:
1292 1294 self.ui.status(_("no changes found\n"))
1293 1295 return 0
1294 1296
1295 1297 if heads is None:
1296 1298 cg = remote.changegroup(fetch, 'pull')
1297 1299 else:
1298 1300 if 'changegroupsubset' not in remote.capabilities:
1299 1301 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1300 1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1301 1303 return self.addchangegroup(cg, 'pull', remote.url())
1302 1304 finally:
1303 1305 if mylock:
1304 1306 lock.release()
1305 1307
1306 1308 def push(self, remote, force=False, revs=None):
1307 1309 # there are two ways to push to remote repo:
1308 1310 #
1309 1311 # addchangegroup assumes local user can lock remote
1310 1312 # repo (local filesystem, old ssh servers).
1311 1313 #
1312 1314 # unbundle assumes local user cannot lock remote repo (new ssh
1313 1315 # servers, http servers).
1314 1316
1315 1317 if remote.capable('unbundle'):
1316 1318 return self.push_unbundle(remote, force, revs)
1317 1319 return self.push_addchangegroup(remote, force, revs)
1318 1320
1319 1321 def prepush(self, remote, force, revs):
1320 1322 base = {}
1321 1323 remote_heads = remote.heads()
1322 1324 inc = self.findincoming(remote, base, remote_heads, force=force)
1323 1325
1324 1326 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1325 1327 if revs is not None:
1326 1328 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1327 1329 else:
1328 1330 bases, heads = update, self.changelog.heads()
1329 1331
1330 1332 if not bases:
1331 1333 self.ui.status(_("no changes found\n"))
1332 1334 return None, 1
1333 1335 elif not force:
1334 1336 # check if we're creating new remote heads
1335 1337 # to be a remote head after push, node must be either
1336 1338 # - unknown locally
1337 1339 # - a local outgoing head descended from update
1338 1340 # - a remote head that's known locally and not
1339 1341 # ancestral to an outgoing head
1340 1342
1341 1343 warn = 0
1342 1344
1343 1345 if remote_heads == [nullid]:
1344 1346 warn = 0
1345 1347 elif not revs and len(heads) > len(remote_heads):
1346 1348 warn = 1
1347 1349 else:
1348 1350 newheads = list(heads)
1349 1351 for r in remote_heads:
1350 1352 if r in self.changelog.nodemap:
1351 1353 desc = self.changelog.heads(r)
1352 1354 l = [h for h in heads if h in desc]
1353 1355 if not l:
1354 1356 newheads.append(r)
1355 1357 else:
1356 1358 newheads.append(r)
1357 1359 if len(newheads) > len(remote_heads):
1358 1360 warn = 1
1359 1361
1360 1362 if warn:
1361 1363 self.ui.warn(_("abort: push creates new remote branches!\n"))
1362 1364 self.ui.status(_("(did you forget to merge?"
1363 1365 " use push -f to force)\n"))
1364 1366 return None, 1
1365 1367 elif inc:
1366 1368 self.ui.warn(_("note: unsynced remote changes!\n"))
1367 1369
1368 1370
1369 1371 if revs is None:
1370 1372 cg = self.changegroup(update, 'push')
1371 1373 else:
1372 1374 cg = self.changegroupsubset(update, revs, 'push')
1373 1375 return cg, remote_heads
1374 1376
1375 1377 def push_addchangegroup(self, remote, force, revs):
1376 1378 lock = remote.lock()
1377 1379
1378 1380 ret = self.prepush(remote, force, revs)
1379 1381 if ret[0] is not None:
1380 1382 cg, remote_heads = ret
1381 1383 return remote.addchangegroup(cg, 'push', self.url())
1382 1384 return ret[1]
1383 1385
1384 1386 def push_unbundle(self, remote, force, revs):
1385 1387 # local repo finds heads on server, finds out what revs it
1386 1388 # must push. once revs transferred, if server finds it has
1387 1389 # different heads (someone else won commit/push race), server
1388 1390 # aborts.
1389 1391
1390 1392 ret = self.prepush(remote, force, revs)
1391 1393 if ret[0] is not None:
1392 1394 cg, remote_heads = ret
1393 1395 if force: remote_heads = ['force']
1394 1396 return remote.unbundle(cg, remote_heads, 'push')
1395 1397 return ret[1]
1396 1398
1397 1399 def changegroupinfo(self, nodes):
1398 1400 self.ui.note(_("%d changesets found\n") % len(nodes))
1399 1401 if self.ui.debugflag:
1400 1402 self.ui.debug(_("List of changesets:\n"))
1401 1403 for node in nodes:
1402 1404 self.ui.debug("%s\n" % hex(node))
1403 1405
1404 1406 def changegroupsubset(self, bases, heads, source):
1405 1407 """This function generates a changegroup consisting of all the nodes
1406 1408 that are descendents of any of the bases, and ancestors of any of
1407 1409 the heads.
1408 1410
1409 1411 It is fairly complex as determining which filenodes and which
1410 1412 manifest nodes need to be included for the changeset to be complete
1411 1413 is non-trivial.
1412 1414
1413 1415 Another wrinkle is doing the reverse, figuring out which changeset in
1414 1416 the changegroup a particular filenode or manifestnode belongs to."""
1415 1417
1416 1418 self.hook('preoutgoing', throw=True, source=source)
1417 1419
1418 1420 # Set up some initial variables
1419 1421 # Make it easy to refer to self.changelog
1420 1422 cl = self.changelog
1421 1423 # msng is short for missing - compute the list of changesets in this
1422 1424 # changegroup.
1423 1425 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1424 1426 self.changegroupinfo(msng_cl_lst)
1425 1427 # Some bases may turn out to be superfluous, and some heads may be
1426 1428 # too. nodesbetween will return the minimal set of bases and heads
1427 1429 # necessary to re-create the changegroup.
1428 1430
1429 1431 # Known heads are the list of heads that it is assumed the recipient
1430 1432 # of this changegroup will know about.
1431 1433 knownheads = {}
1432 1434 # We assume that all parents of bases are known heads.
1433 1435 for n in bases:
1434 1436 for p in cl.parents(n):
1435 1437 if p != nullid:
1436 1438 knownheads[p] = 1
1437 1439 knownheads = knownheads.keys()
1438 1440 if knownheads:
1439 1441 # Now that we know what heads are known, we can compute which
1440 1442 # changesets are known. The recipient must know about all
1441 1443 # changesets required to reach the known heads from the null
1442 1444 # changeset.
1443 1445 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1444 1446 junk = None
1445 1447 # Transform the list into an ersatz set.
1446 1448 has_cl_set = dict.fromkeys(has_cl_set)
1447 1449 else:
1448 1450 # If there were no known heads, the recipient cannot be assumed to
1449 1451 # know about any changesets.
1450 1452 has_cl_set = {}
1451 1453
1452 1454 # Make it easy to refer to self.manifest
1453 1455 mnfst = self.manifest
1454 1456 # We don't know which manifests are missing yet
1455 1457 msng_mnfst_set = {}
1456 1458 # Nor do we know which filenodes are missing.
1457 1459 msng_filenode_set = {}
1458 1460
1459 1461 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1460 1462 junk = None
1461 1463
1462 1464 # A changeset always belongs to itself, so the changenode lookup
1463 1465 # function for a changenode is identity.
1464 1466 def identity(x):
1465 1467 return x
1466 1468
1467 1469 # A function generating function. Sets up an environment for the
1468 1470 # inner function.
1469 1471 def cmp_by_rev_func(revlog):
1470 1472 # Compare two nodes by their revision number in the environment's
1471 1473 # revision history. Since the revision number both represents the
1472 1474 # most efficient order to read the nodes in, and represents a
1473 1475 # topological sorting of the nodes, this function is often useful.
1474 1476 def cmp_by_rev(a, b):
1475 1477 return cmp(revlog.rev(a), revlog.rev(b))
1476 1478 return cmp_by_rev
1477 1479
1478 1480 # If we determine that a particular file or manifest node must be a
1479 1481 # node that the recipient of the changegroup will already have, we can
1480 1482 # also assume the recipient will have all the parents. This function
1481 1483 # prunes them from the set of missing nodes.
1482 1484 def prune_parents(revlog, hasset, msngset):
1483 1485 haslst = hasset.keys()
1484 1486 haslst.sort(cmp_by_rev_func(revlog))
1485 1487 for node in haslst:
1486 1488 parentlst = [p for p in revlog.parents(node) if p != nullid]
1487 1489 while parentlst:
1488 1490 n = parentlst.pop()
1489 1491 if n not in hasset:
1490 1492 hasset[n] = 1
1491 1493 p = [p for p in revlog.parents(n) if p != nullid]
1492 1494 parentlst.extend(p)
1493 1495 for n in hasset:
1494 1496 msngset.pop(n, None)
1495 1497
1496 1498 # This is a function generating function used to set up an environment
1497 1499 # for the inner function to execute in.
1498 1500 def manifest_and_file_collector(changedfileset):
1499 1501 # This is an information gathering function that gathers
1500 1502 # information from each changeset node that goes out as part of
1501 1503 # the changegroup. The information gathered is a list of which
1502 1504 # manifest nodes are potentially required (the recipient may
1503 1505 # already have them) and total list of all files which were
1504 1506 # changed in any changeset in the changegroup.
1505 1507 #
1506 1508 # We also remember the first changenode we saw any manifest
1507 1509 # referenced by so we can later determine which changenode 'owns'
1508 1510 # the manifest.
1509 1511 def collect_manifests_and_files(clnode):
1510 1512 c = cl.read(clnode)
1511 1513 for f in c[3]:
1512 1514 # This is to make sure we only have one instance of each
1513 1515 # filename string for each filename.
1514 1516 changedfileset.setdefault(f, f)
1515 1517 msng_mnfst_set.setdefault(c[0], clnode)
1516 1518 return collect_manifests_and_files
1517 1519
1518 1520 # Figure out which manifest nodes (of the ones we think might be part
1519 1521 # of the changegroup) the recipient must know about and remove them
1520 1522 # from the changegroup.
1521 1523 def prune_manifests():
1522 1524 has_mnfst_set = {}
1523 1525 for n in msng_mnfst_set:
1524 1526 # If a 'missing' manifest thinks it belongs to a changenode
1525 1527 # the recipient is assumed to have, obviously the recipient
1526 1528 # must have that manifest.
1527 1529 linknode = cl.node(mnfst.linkrev(n))
1528 1530 if linknode in has_cl_set:
1529 1531 has_mnfst_set[n] = 1
1530 1532 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1531 1533
1532 1534 # Use the information collected in collect_manifests_and_files to say
1533 1535 # which changenode any manifestnode belongs to.
1534 1536 def lookup_manifest_link(mnfstnode):
1535 1537 return msng_mnfst_set[mnfstnode]
1536 1538
1537 1539 # A function generating function that sets up the initial environment
1538 1540 # the inner function.
1539 1541 def filenode_collector(changedfiles):
1540 1542 next_rev = [0]
1541 1543 # This gathers information from each manifestnode included in the
1542 1544 # changegroup about which filenodes the manifest node references
1543 1545 # so we can include those in the changegroup too.
1544 1546 #
1545 1547 # It also remembers which changenode each filenode belongs to. It
1546 1548 # does this by assuming the a filenode belongs to the changenode
1547 1549 # the first manifest that references it belongs to.
1548 1550 def collect_msng_filenodes(mnfstnode):
1549 1551 r = mnfst.rev(mnfstnode)
1550 1552 if r == next_rev[0]:
1551 1553 # If the last rev we looked at was the one just previous,
1552 1554 # we only need to see a diff.
1553 1555 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1554 1556 # For each line in the delta
1555 1557 for dline in delta.splitlines():
1556 1558 # get the filename and filenode for that line
1557 1559 f, fnode = dline.split('\0')
1558 1560 fnode = bin(fnode[:40])
1559 1561 f = changedfiles.get(f, None)
1560 1562 # And if the file is in the list of files we care
1561 1563 # about.
1562 1564 if f is not None:
1563 1565 # Get the changenode this manifest belongs to
1564 1566 clnode = msng_mnfst_set[mnfstnode]
1565 1567 # Create the set of filenodes for the file if
1566 1568 # there isn't one already.
1567 1569 ndset = msng_filenode_set.setdefault(f, {})
1568 1570 # And set the filenode's changelog node to the
1569 1571 # manifest's if it hasn't been set already.
1570 1572 ndset.setdefault(fnode, clnode)
1571 1573 else:
1572 1574 # Otherwise we need a full manifest.
1573 1575 m = mnfst.read(mnfstnode)
1574 1576 # For every file in we care about.
1575 1577 for f in changedfiles:
1576 1578 fnode = m.get(f, None)
1577 1579 # If it's in the manifest
1578 1580 if fnode is not None:
1579 1581 # See comments above.
1580 1582 clnode = msng_mnfst_set[mnfstnode]
1581 1583 ndset = msng_filenode_set.setdefault(f, {})
1582 1584 ndset.setdefault(fnode, clnode)
1583 1585 # Remember the revision we hope to see next.
1584 1586 next_rev[0] = r + 1
1585 1587 return collect_msng_filenodes
1586 1588
1587 1589 # We have a list of filenodes we think we need for a file, lets remove
1588 1590 # all those we now the recipient must have.
1589 1591 def prune_filenodes(f, filerevlog):
1590 1592 msngset = msng_filenode_set[f]
1591 1593 hasset = {}
1592 1594 # If a 'missing' filenode thinks it belongs to a changenode we
1593 1595 # assume the recipient must have, then the recipient must have
1594 1596 # that filenode.
1595 1597 for n in msngset:
1596 1598 clnode = cl.node(filerevlog.linkrev(n))
1597 1599 if clnode in has_cl_set:
1598 1600 hasset[n] = 1
1599 1601 prune_parents(filerevlog, hasset, msngset)
1600 1602
1601 1603 # A function generator function that sets up the a context for the
1602 1604 # inner function.
1603 1605 def lookup_filenode_link_func(fname):
1604 1606 msngset = msng_filenode_set[fname]
1605 1607 # Lookup the changenode the filenode belongs to.
1606 1608 def lookup_filenode_link(fnode):
1607 1609 return msngset[fnode]
1608 1610 return lookup_filenode_link
1609 1611
1610 1612 # Now that we have all theses utility functions to help out and
1611 1613 # logically divide up the task, generate the group.
1612 1614 def gengroup():
1613 1615 # The set of changed files starts empty.
1614 1616 changedfiles = {}
1615 1617 # Create a changenode group generator that will call our functions
1616 1618 # back to lookup the owning changenode and collect information.
1617 1619 group = cl.group(msng_cl_lst, identity,
1618 1620 manifest_and_file_collector(changedfiles))
1619 1621 for chnk in group:
1620 1622 yield chnk
1621 1623
1622 1624 # The list of manifests has been collected by the generator
1623 1625 # calling our functions back.
1624 1626 prune_manifests()
1625 1627 msng_mnfst_lst = msng_mnfst_set.keys()
1626 1628 # Sort the manifestnodes by revision number.
1627 1629 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1628 1630 # Create a generator for the manifestnodes that calls our lookup
1629 1631 # and data collection functions back.
1630 1632 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1631 1633 filenode_collector(changedfiles))
1632 1634 for chnk in group:
1633 1635 yield chnk
1634 1636
1635 1637 # These are no longer needed, dereference and toss the memory for
1636 1638 # them.
1637 1639 msng_mnfst_lst = None
1638 1640 msng_mnfst_set.clear()
1639 1641
1640 1642 changedfiles = changedfiles.keys()
1641 1643 changedfiles.sort()
1642 1644 # Go through all our files in order sorted by name.
1643 1645 for fname in changedfiles:
1644 1646 filerevlog = self.file(fname)
1645 1647 # Toss out the filenodes that the recipient isn't really
1646 1648 # missing.
1647 1649 if msng_filenode_set.has_key(fname):
1648 1650 prune_filenodes(fname, filerevlog)
1649 1651 msng_filenode_lst = msng_filenode_set[fname].keys()
1650 1652 else:
1651 1653 msng_filenode_lst = []
1652 1654 # If any filenodes are left, generate the group for them,
1653 1655 # otherwise don't bother.
1654 1656 if len(msng_filenode_lst) > 0:
1655 1657 yield changegroup.genchunk(fname)
1656 1658 # Sort the filenodes by their revision #
1657 1659 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1658 1660 # Create a group generator and only pass in a changenode
1659 1661 # lookup function as we need to collect no information
1660 1662 # from filenodes.
1661 1663 group = filerevlog.group(msng_filenode_lst,
1662 1664 lookup_filenode_link_func(fname))
1663 1665 for chnk in group:
1664 1666 yield chnk
1665 1667 if msng_filenode_set.has_key(fname):
1666 1668 # Don't need this anymore, toss it to free memory.
1667 1669 del msng_filenode_set[fname]
1668 1670 # Signal that no more groups are left.
1669 1671 yield changegroup.closechunk()
1670 1672
1671 1673 if msng_cl_lst:
1672 1674 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1673 1675
1674 1676 return util.chunkbuffer(gengroup())
1675 1677
1676 1678 def changegroup(self, basenodes, source):
1677 1679 """Generate a changegroup of all nodes that we have that a recipient
1678 1680 doesn't.
1679 1681
1680 1682 This is much easier than the previous function as we can assume that
1681 1683 the recipient has any changenode we aren't sending them."""
1682 1684
1683 1685 self.hook('preoutgoing', throw=True, source=source)
1684 1686
1685 1687 cl = self.changelog
1686 1688 nodes = cl.nodesbetween(basenodes, None)[0]
1687 1689 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1688 1690 self.changegroupinfo(nodes)
1689 1691
1690 1692 def identity(x):
1691 1693 return x
1692 1694
1693 1695 def gennodelst(revlog):
1694 1696 for r in xrange(0, revlog.count()):
1695 1697 n = revlog.node(r)
1696 1698 if revlog.linkrev(n) in revset:
1697 1699 yield n
1698 1700
1699 1701 def changed_file_collector(changedfileset):
1700 1702 def collect_changed_files(clnode):
1701 1703 c = cl.read(clnode)
1702 1704 for fname in c[3]:
1703 1705 changedfileset[fname] = 1
1704 1706 return collect_changed_files
1705 1707
1706 1708 def lookuprevlink_func(revlog):
1707 1709 def lookuprevlink(n):
1708 1710 return cl.node(revlog.linkrev(n))
1709 1711 return lookuprevlink
1710 1712
1711 1713 def gengroup():
1712 1714 # construct a list of all changed files
1713 1715 changedfiles = {}
1714 1716
1715 1717 for chnk in cl.group(nodes, identity,
1716 1718 changed_file_collector(changedfiles)):
1717 1719 yield chnk
1718 1720 changedfiles = changedfiles.keys()
1719 1721 changedfiles.sort()
1720 1722
1721 1723 mnfst = self.manifest
1722 1724 nodeiter = gennodelst(mnfst)
1723 1725 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1724 1726 yield chnk
1725 1727
1726 1728 for fname in changedfiles:
1727 1729 filerevlog = self.file(fname)
1728 1730 nodeiter = gennodelst(filerevlog)
1729 1731 nodeiter = list(nodeiter)
1730 1732 if nodeiter:
1731 1733 yield changegroup.genchunk(fname)
1732 1734 lookup = lookuprevlink_func(filerevlog)
1733 1735 for chnk in filerevlog.group(nodeiter, lookup):
1734 1736 yield chnk
1735 1737
1736 1738 yield changegroup.closechunk()
1737 1739
1738 1740 if nodes:
1739 1741 self.hook('outgoing', node=hex(nodes[0]), source=source)
1740 1742
1741 1743 return util.chunkbuffer(gengroup())
1742 1744
1743 1745 def addchangegroup(self, source, srctype, url):
1744 1746 """add changegroup to repo.
1745 1747
1746 1748 return values:
1747 1749 - nothing changed or no source: 0
1748 1750 - more heads than before: 1+added heads (2..n)
1749 1751 - less heads than before: -1-removed heads (-2..-n)
1750 1752 - number of heads stays the same: 1
1751 1753 """
1752 1754 def csmap(x):
1753 1755 self.ui.debug(_("add changeset %s\n") % short(x))
1754 1756 return cl.count()
1755 1757
1756 1758 def revmap(x):
1757 1759 return cl.rev(x)
1758 1760
1759 1761 if not source:
1760 1762 return 0
1761 1763
1762 1764 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1763 1765
1764 1766 changesets = files = revisions = 0
1765 1767
1766 1768 tr = self.transaction()
1767 1769
1768 1770 # write changelog data to temp files so concurrent readers will not see
1769 1771 # inconsistent view
1770 1772 cl = None
1771 1773 try:
1772 1774 cl = appendfile.appendchangelog(self.sopener,
1773 1775 self.changelog.version)
1774 1776
1775 1777 oldheads = len(cl.heads())
1776 1778
1777 1779 # pull off the changeset group
1778 1780 self.ui.status(_("adding changesets\n"))
1779 1781 cor = cl.count() - 1
1780 1782 chunkiter = changegroup.chunkiter(source)
1781 1783 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1782 1784 raise util.Abort(_("received changelog group is empty"))
1783 1785 cnr = cl.count() - 1
1784 1786 changesets = cnr - cor
1785 1787
1786 1788 # pull off the manifest group
1787 1789 self.ui.status(_("adding manifests\n"))
1788 1790 chunkiter = changegroup.chunkiter(source)
1789 1791 # no need to check for empty manifest group here:
1790 1792 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1791 1793 # no new manifest will be created and the manifest group will
1792 1794 # be empty during the pull
1793 1795 self.manifest.addgroup(chunkiter, revmap, tr)
1794 1796
1795 1797 # process the files
1796 1798 self.ui.status(_("adding file changes\n"))
1797 1799 while 1:
1798 1800 f = changegroup.getchunk(source)
1799 1801 if not f:
1800 1802 break
1801 1803 self.ui.debug(_("adding %s revisions\n") % f)
1802 1804 fl = self.file(f)
1803 1805 o = fl.count()
1804 1806 chunkiter = changegroup.chunkiter(source)
1805 1807 if fl.addgroup(chunkiter, revmap, tr) is None:
1806 1808 raise util.Abort(_("received file revlog group is empty"))
1807 1809 revisions += fl.count() - o
1808 1810 files += 1
1809 1811
1810 1812 cl.writedata()
1811 1813 finally:
1812 1814 if cl:
1813 1815 cl.cleanup()
1814 1816
1815 1817 # make changelog see real files again
1816 1818 self.changelog = changelog.changelog(self.sopener,
1817 1819 self.changelog.version)
1818 1820 self.changelog.checkinlinesize(tr)
1819 1821
1820 1822 newheads = len(self.changelog.heads())
1821 1823 heads = ""
1822 1824 if oldheads and newheads != oldheads:
1823 1825 heads = _(" (%+d heads)") % (newheads - oldheads)
1824 1826
1825 1827 self.ui.status(_("added %d changesets"
1826 1828 " with %d changes to %d files%s\n")
1827 1829 % (changesets, revisions, files, heads))
1828 1830
1829 1831 if changesets > 0:
1830 1832 self.hook('pretxnchangegroup', throw=True,
1831 1833 node=hex(self.changelog.node(cor+1)), source=srctype,
1832 1834 url=url)
1833 1835
1834 1836 tr.close()
1835 1837
1836 1838 if changesets > 0:
1837 1839 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1838 1840 source=srctype, url=url)
1839 1841
1840 1842 for i in xrange(cor + 1, cnr + 1):
1841 1843 self.hook("incoming", node=hex(self.changelog.node(i)),
1842 1844 source=srctype, url=url)
1843 1845
1844 1846 # never return 0 here:
1845 1847 if newheads < oldheads:
1846 1848 return newheads - oldheads - 1
1847 1849 else:
1848 1850 return newheads - oldheads + 1
1849 1851
1850 1852
1851 1853 def stream_in(self, remote):
1852 1854 fp = remote.stream_out()
1853 1855 l = fp.readline()
1854 1856 try:
1855 1857 resp = int(l)
1856 1858 except ValueError:
1857 1859 raise util.UnexpectedOutput(
1858 1860 _('Unexpected response from remote server:'), l)
1859 1861 if resp == 1:
1860 1862 raise util.Abort(_('operation forbidden by server'))
1861 1863 elif resp == 2:
1862 1864 raise util.Abort(_('locking the remote repository failed'))
1863 1865 elif resp != 0:
1864 1866 raise util.Abort(_('the server sent an unknown error code'))
1865 1867 self.ui.status(_('streaming all changes\n'))
1866 1868 l = fp.readline()
1867 1869 try:
1868 1870 total_files, total_bytes = map(int, l.split(' ', 1))
1869 1871 except ValueError, TypeError:
1870 1872 raise util.UnexpectedOutput(
1871 1873 _('Unexpected response from remote server:'), l)
1872 1874 self.ui.status(_('%d files to transfer, %s of data\n') %
1873 1875 (total_files, util.bytecount(total_bytes)))
1874 1876 start = time.time()
1875 1877 for i in xrange(total_files):
1876 1878 # XXX doesn't support '\n' or '\r' in filenames
1877 1879 l = fp.readline()
1878 1880 try:
1879 1881 name, size = l.split('\0', 1)
1880 1882 size = int(size)
1881 1883 except ValueError, TypeError:
1882 1884 raise util.UnexpectedOutput(
1883 1885 _('Unexpected response from remote server:'), l)
1884 1886 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1885 1887 ofp = self.sopener(name, 'w')
1886 1888 for chunk in util.filechunkiter(fp, limit=size):
1887 1889 ofp.write(chunk)
1888 1890 ofp.close()
1889 1891 elapsed = time.time() - start
1890 1892 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1891 1893 (util.bytecount(total_bytes), elapsed,
1892 1894 util.bytecount(total_bytes / elapsed)))
1893 1895 self.reload()
1894 1896 return len(self.heads()) + 1
1895 1897
1896 1898 def clone(self, remote, heads=[], stream=False):
1897 1899 '''clone remote repository.
1898 1900
1899 1901 keyword arguments:
1900 1902 heads: list of revs to clone (forces use of pull)
1901 1903 stream: use streaming clone if possible'''
1902 1904
1903 1905 # now, all clients that can request uncompressed clones can
1904 1906 # read repo formats supported by all servers that can serve
1905 1907 # them.
1906 1908
1907 1909 # if revlog format changes, client will have to check version
1908 1910 # and format flags on "stream" capability, and use
1909 1911 # uncompressed only if compatible.
1910 1912
1911 1913 if stream and not heads and remote.capable('stream'):
1912 1914 return self.stream_in(remote)
1913 1915 return self.pull(remote, heads)
1914 1916
1915 1917 # used to avoid circular references so destructors work
1916 1918 def aftertrans(files):
1917 1919 renamefiles = [tuple(t) for t in files]
1918 1920 def a():
1919 1921 for src, dest in renamefiles:
1920 1922 util.rename(src, dest)
1921 1923 return a
1922 1924
1923 1925 def instance(ui, path, create):
1924 1926 return localrepository(ui, util.drop_scheme('file', path), create)
1925 1927
1926 1928 def islocal(path):
1927 1929 return True
General Comments 0
You need to be logged in to leave comments. Login now