##// END OF EJS Templates
don't use readline() to read branches.cache...
Alexis S. L. Carvalho -
r3668:6f669696 default
parent child Browse files
Show More
@@ -1,1866 +1,1868 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.realpath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.sopener = util.opener(self.path)
51 51 self.wopener = util.opener(self.root)
52 52
53 53 try:
54 54 self.ui.readconfig(self.join("hgrc"), self.root)
55 55 except IOError:
56 56 pass
57 57
58 58 v = self.ui.configrevlog()
59 59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 61 fl = v.get('flags', None)
62 62 flags = 0
63 63 if fl != None:
64 64 for x in fl.split():
65 65 flags |= revlog.flagstr(x)
66 66 elif self.revlogv1:
67 67 flags = revlog.REVLOG_DEFAULT_FLAGS
68 68
69 69 v = self.revlogversion | flags
70 70 self.manifest = manifest.manifest(self.sopener, v)
71 71 self.changelog = changelog.changelog(self.sopener, v)
72 72
73 73 # the changelog might not have the inline index flag
74 74 # on. If the format of the changelog is the same as found in
75 75 # .hgrc, apply any flags found in the .hgrc as well.
76 76 # Otherwise, just version from the changelog
77 77 v = self.changelog.version
78 78 if v == self.revlogversion:
79 79 v |= flags
80 80 self.revlogversion = v
81 81
82 82 self.tagscache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.encodepats = None
86 86 self.decodepats = None
87 87 self.transhandle = None
88 88
89 89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90 90
91 91 def url(self):
92 92 return 'file:' + self.root
93 93
94 94 def hook(self, name, throw=False, **args):
95 95 def callhook(hname, funcname):
96 96 '''call python hook. hook is callable object, looked up as
97 97 name in python module. if callable returns "true", hook
98 98 fails, else passes. if hook raises exception, treated as
99 99 hook failure. exception propagates if throw is "true".
100 100
101 101 reason for "true" meaning "hook failed" is so that
102 102 unmodified commands (e.g. mercurial.commands.update) can
103 103 be run as hooks without wrappers to convert return values.'''
104 104
105 105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 106 d = funcname.rfind('.')
107 107 if d == -1:
108 108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 109 % (hname, funcname))
110 110 modname = funcname[:d]
111 111 try:
112 112 obj = __import__(modname)
113 113 except ImportError:
114 114 try:
115 115 # extensions are loaded with hgext_ prefix
116 116 obj = __import__("hgext_%s" % modname)
117 117 except ImportError:
118 118 raise util.Abort(_('%s hook is invalid '
119 119 '(import of "%s" failed)') %
120 120 (hname, modname))
121 121 try:
122 122 for p in funcname.split('.')[1:]:
123 123 obj = getattr(obj, p)
124 124 except AttributeError, err:
125 125 raise util.Abort(_('%s hook is invalid '
126 126 '("%s" is not defined)') %
127 127 (hname, funcname))
128 128 if not callable(obj):
129 129 raise util.Abort(_('%s hook is invalid '
130 130 '("%s" is not callable)') %
131 131 (hname, funcname))
132 132 try:
133 133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 134 except (KeyboardInterrupt, util.SignalInterrupt):
135 135 raise
136 136 except Exception, exc:
137 137 if isinstance(exc, util.Abort):
138 138 self.ui.warn(_('error: %s hook failed: %s\n') %
139 139 (hname, exc.args[0]))
140 140 else:
141 141 self.ui.warn(_('error: %s hook raised an exception: '
142 142 '%s\n') % (hname, exc))
143 143 if throw:
144 144 raise
145 145 self.ui.print_exc()
146 146 return True
147 147 if r:
148 148 if throw:
149 149 raise util.Abort(_('%s hook failed') % hname)
150 150 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 151 return r
152 152
153 153 def runhook(name, cmd):
154 154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 156 r = util.system(cmd, environ=env, cwd=self.root)
157 157 if r:
158 158 desc, r = util.explain_exit(r)
159 159 if throw:
160 160 raise util.Abort(_('%s hook %s') % (name, desc))
161 161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 162 return r
163 163
164 164 r = False
165 165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 166 if hname.split(".", 1)[0] == name and cmd]
167 167 hooks.sort()
168 168 for hname, cmd in hooks:
169 169 if cmd.startswith('python:'):
170 170 r = callhook(hname, cmd[7:].strip()) or r
171 171 else:
172 172 r = runhook(hname, cmd) or r
173 173 return r
174 174
175 175 tag_disallowed = ':\r\n'
176 176
177 177 def tag(self, name, node, message, local, user, date):
178 178 '''tag a revision with a symbolic name.
179 179
180 180 if local is True, the tag is stored in a per-repository file.
181 181 otherwise, it is stored in the .hgtags file, and a new
182 182 changeset is committed with the change.
183 183
184 184 keyword arguments:
185 185
186 186 local: whether to store tag in non-version-controlled file
187 187 (default False)
188 188
189 189 message: commit message to use if committing
190 190
191 191 user: name of user to use if committing
192 192
193 193 date: date tuple to use if committing'''
194 194
195 195 for c in self.tag_disallowed:
196 196 if c in name:
197 197 raise util.Abort(_('%r cannot be used in a tag name') % c)
198 198
199 199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200 200
201 201 if local:
202 202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 203 self.hook('tag', node=hex(node), tag=name, local=local)
204 204 return
205 205
206 206 for x in self.status()[:5]:
207 207 if '.hgtags' in x:
208 208 raise util.Abort(_('working copy of .hgtags is changed '
209 209 '(please commit .hgtags manually)'))
210 210
211 211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 212 if self.dirstate.state('.hgtags') == '?':
213 213 self.add(['.hgtags'])
214 214
215 215 self.commit(['.hgtags'], message, user, date)
216 216 self.hook('tag', node=hex(node), tag=name, local=local)
217 217
218 218 def tags(self):
219 219 '''return a mapping of tag to node'''
220 220 if not self.tagscache:
221 221 self.tagscache = {}
222 222
223 223 def parsetag(line, context):
224 224 if not line:
225 225 return
226 226 s = l.split(" ", 1)
227 227 if len(s) != 2:
228 228 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 229 return
230 230 node, key = s
231 231 key = key.strip()
232 232 try:
233 233 bin_n = bin(node)
234 234 except TypeError:
235 235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 236 (context, node))
237 237 return
238 238 if bin_n not in self.changelog.nodemap:
239 239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 240 (context, key))
241 241 return
242 242 self.tagscache[key] = bin_n
243 243
244 244 # read the tags file from each head, ending with the tip,
245 245 # and add each tag found to the map, with "newer" ones
246 246 # taking precedence
247 247 f = None
248 248 for rev, node, fnode in self._hgtagsnodes():
249 249 f = (f and f.filectx(fnode) or
250 250 self.filectx('.hgtags', fileid=fnode))
251 251 count = 0
252 252 for l in f.data().splitlines():
253 253 count += 1
254 254 parsetag(l, _("%s, line %d") % (str(f), count))
255 255
256 256 try:
257 257 f = self.opener("localtags")
258 258 count = 0
259 259 for l in f:
260 260 count += 1
261 261 parsetag(l, _("localtags, line %d") % count)
262 262 except IOError:
263 263 pass
264 264
265 265 self.tagscache['tip'] = self.changelog.tip()
266 266
267 267 return self.tagscache
268 268
269 269 def _hgtagsnodes(self):
270 270 heads = self.heads()
271 271 heads.reverse()
272 272 last = {}
273 273 ret = []
274 274 for node in heads:
275 275 c = self.changectx(node)
276 276 rev = c.rev()
277 277 try:
278 278 fnode = c.filenode('.hgtags')
279 279 except repo.LookupError:
280 280 continue
281 281 ret.append((rev, node, fnode))
282 282 if fnode in last:
283 283 ret[last[fnode]] = None
284 284 last[fnode] = len(ret) - 1
285 285 return [item for item in ret if item]
286 286
287 287 def tagslist(self):
288 288 '''return a list of tags ordered by revision'''
289 289 l = []
290 290 for t, n in self.tags().items():
291 291 try:
292 292 r = self.changelog.rev(n)
293 293 except:
294 294 r = -2 # sort to the beginning of the list if unknown
295 295 l.append((r, t, n))
296 296 l.sort()
297 297 return [(t, n) for r, t, n in l]
298 298
299 299 def nodetags(self, node):
300 300 '''return the tags associated with a node'''
301 301 if not self.nodetagscache:
302 302 self.nodetagscache = {}
303 303 for t, n in self.tags().items():
304 304 self.nodetagscache.setdefault(n, []).append(t)
305 305 return self.nodetagscache.get(node, [])
306 306
307 307 def branchtags(self):
308 308 if self.branchcache != None:
309 309 return self.branchcache
310 310
311 311 self.branchcache = {} # avoid recursion in changectx
312 312
313 313 partial, last, lrev = self._readbranchcache()
314 314
315 315 tiprev = self.changelog.count() - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 self.branchcache = partial
321 321 return self.branchcache
322 322
323 323 def _readbranchcache(self):
324 324 partial = {}
325 325 try:
326 326 f = self.opener("branches.cache")
327 last, lrev = f.readline().rstrip().split(" ", 1)
327 lines = f.read().split('\n')
328 f.close()
329 last, lrev = lines.pop(0).rstrip().split(" ", 1)
328 330 last, lrev = bin(last), int(lrev)
329 331 if (lrev < self.changelog.count() and
330 332 self.changelog.node(lrev) == last): # sanity check
331 for l in f:
333 for l in lines:
334 if not l: continue
332 335 node, label = l.rstrip().split(" ", 1)
333 336 partial[label] = bin(node)
334 337 else: # invalidate the cache
335 338 last, lrev = nullid, nullrev
336 f.close()
337 339 except IOError:
338 340 last, lrev = nullid, nullrev
339 341 return partial, last, lrev
340 342
341 343 def _writebranchcache(self, branches, tip, tiprev):
342 344 try:
343 345 f = self.opener("branches.cache", "w")
344 346 f.write("%s %s\n" % (hex(tip), tiprev))
345 347 for label, node in branches.iteritems():
346 348 f.write("%s %s\n" % (hex(node), label))
347 349 except IOError:
348 350 pass
349 351
350 352 def _updatebranchcache(self, partial, start, end):
351 353 for r in xrange(start, end):
352 354 c = self.changectx(r)
353 355 b = c.branch()
354 356 if b:
355 357 partial[b] = c.node()
356 358
357 359 def lookup(self, key):
358 360 if key == '.':
359 361 key = self.dirstate.parents()[0]
360 362 if key == nullid:
361 363 raise repo.RepoError(_("no revision checked out"))
362 364 n = self.changelog._match(key)
363 365 if n:
364 366 return n
365 367 if key in self.tags():
366 368 return self.tags()[key]
367 369 if key in self.branchtags():
368 370 return self.branchtags()[key]
369 371 n = self.changelog._partialmatch(key)
370 372 if n:
371 373 return n
372 374 raise repo.RepoError(_("unknown revision '%s'") % key)
373 375
374 376 def dev(self):
375 377 return os.lstat(self.path).st_dev
376 378
377 379 def local(self):
378 380 return True
379 381
380 382 def join(self, f):
381 383 return os.path.join(self.path, f)
382 384
383 385 def sjoin(self, f):
384 386 return os.path.join(self.path, f)
385 387
386 388 def wjoin(self, f):
387 389 return os.path.join(self.root, f)
388 390
389 391 def file(self, f):
390 392 if f[0] == '/':
391 393 f = f[1:]
392 394 return filelog.filelog(self.sopener, f, self.revlogversion)
393 395
394 396 def changectx(self, changeid=None):
395 397 return context.changectx(self, changeid)
396 398
397 399 def workingctx(self):
398 400 return context.workingctx(self)
399 401
400 402 def parents(self, changeid=None):
401 403 '''
402 404 get list of changectxs for parents of changeid or working directory
403 405 '''
404 406 if changeid is None:
405 407 pl = self.dirstate.parents()
406 408 else:
407 409 n = self.changelog.lookup(changeid)
408 410 pl = self.changelog.parents(n)
409 411 if pl[1] == nullid:
410 412 return [self.changectx(pl[0])]
411 413 return [self.changectx(pl[0]), self.changectx(pl[1])]
412 414
413 415 def filectx(self, path, changeid=None, fileid=None):
414 416 """changeid can be a changeset revision, node, or tag.
415 417 fileid can be a file revision or node."""
416 418 return context.filectx(self, path, changeid, fileid)
417 419
418 420 def getcwd(self):
419 421 return self.dirstate.getcwd()
420 422
421 423 def wfile(self, f, mode='r'):
422 424 return self.wopener(f, mode)
423 425
424 426 def wread(self, filename):
425 427 if self.encodepats == None:
426 428 l = []
427 429 for pat, cmd in self.ui.configitems("encode"):
428 430 mf = util.matcher(self.root, "", [pat], [], [])[1]
429 431 l.append((mf, cmd))
430 432 self.encodepats = l
431 433
432 434 data = self.wopener(filename, 'r').read()
433 435
434 436 for mf, cmd in self.encodepats:
435 437 if mf(filename):
436 438 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
437 439 data = util.filter(data, cmd)
438 440 break
439 441
440 442 return data
441 443
442 444 def wwrite(self, filename, data, fd=None):
443 445 if self.decodepats == None:
444 446 l = []
445 447 for pat, cmd in self.ui.configitems("decode"):
446 448 mf = util.matcher(self.root, "", [pat], [], [])[1]
447 449 l.append((mf, cmd))
448 450 self.decodepats = l
449 451
450 452 for mf, cmd in self.decodepats:
451 453 if mf(filename):
452 454 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
453 455 data = util.filter(data, cmd)
454 456 break
455 457
456 458 if fd:
457 459 return fd.write(data)
458 460 return self.wopener(filename, 'w').write(data)
459 461
460 462 def transaction(self):
461 463 tr = self.transhandle
462 464 if tr != None and tr.running():
463 465 return tr.nest()
464 466
465 467 # save dirstate for rollback
466 468 try:
467 469 ds = self.opener("dirstate").read()
468 470 except IOError:
469 471 ds = ""
470 472 self.opener("journal.dirstate", "w").write(ds)
471 473
472 474 tr = transaction.transaction(self.ui.warn, self.sopener,
473 475 self.sjoin("journal"),
474 476 aftertrans(self.path))
475 477 self.transhandle = tr
476 478 return tr
477 479
478 480 def recover(self):
479 481 l = self.lock()
480 482 if os.path.exists(self.sjoin("journal")):
481 483 self.ui.status(_("rolling back interrupted transaction\n"))
482 484 transaction.rollback(self.sopener, self.sjoin("journal"))
483 485 self.reload()
484 486 return True
485 487 else:
486 488 self.ui.warn(_("no interrupted transaction available\n"))
487 489 return False
488 490
489 491 def rollback(self, wlock=None):
490 492 if not wlock:
491 493 wlock = self.wlock()
492 494 l = self.lock()
493 495 if os.path.exists(self.sjoin("undo")):
494 496 self.ui.status(_("rolling back last transaction\n"))
495 497 transaction.rollback(self.sopener, self.sjoin("undo"))
496 498 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
497 499 self.reload()
498 500 self.wreload()
499 501 else:
500 502 self.ui.warn(_("no rollback information available\n"))
501 503
502 504 def wreload(self):
503 505 self.dirstate.read()
504 506
505 507 def reload(self):
506 508 self.changelog.load()
507 509 self.manifest.load()
508 510 self.tagscache = None
509 511 self.nodetagscache = None
510 512
511 513 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
512 514 desc=None):
513 515 try:
514 516 l = lock.lock(lockname, 0, releasefn, desc=desc)
515 517 except lock.LockHeld, inst:
516 518 if not wait:
517 519 raise
518 520 self.ui.warn(_("waiting for lock on %s held by %s\n") %
519 521 (desc, inst.args[0]))
520 522 # default to 600 seconds timeout
521 523 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
522 524 releasefn, desc=desc)
523 525 if acquirefn:
524 526 acquirefn()
525 527 return l
526 528
527 529 def lock(self, wait=1):
528 530 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
529 531 desc=_('repository %s') % self.origroot)
530 532
531 533 def wlock(self, wait=1):
532 534 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
533 535 self.wreload,
534 536 desc=_('working directory of %s') % self.origroot)
535 537
536 538 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
537 539 """
538 540 commit an individual file as part of a larger transaction
539 541 """
540 542
541 543 t = self.wread(fn)
542 544 fl = self.file(fn)
543 545 fp1 = manifest1.get(fn, nullid)
544 546 fp2 = manifest2.get(fn, nullid)
545 547
546 548 meta = {}
547 549 cp = self.dirstate.copied(fn)
548 550 if cp:
549 551 meta["copy"] = cp
550 552 if not manifest2: # not a branch merge
551 553 meta["copyrev"] = hex(manifest1.get(cp, nullid))
552 554 fp2 = nullid
553 555 elif fp2 != nullid: # copied on remote side
554 556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
555 557 else: # copied on local side, reversed
556 558 meta["copyrev"] = hex(manifest2.get(cp))
557 559 fp2 = nullid
558 560 self.ui.debug(_(" %s: copy %s:%s\n") %
559 561 (fn, cp, meta["copyrev"]))
560 562 fp1 = nullid
561 563 elif fp2 != nullid:
562 564 # is one parent an ancestor of the other?
563 565 fpa = fl.ancestor(fp1, fp2)
564 566 if fpa == fp1:
565 567 fp1, fp2 = fp2, nullid
566 568 elif fpa == fp2:
567 569 fp2 = nullid
568 570
569 571 # is the file unmodified from the parent? report existing entry
570 572 if fp2 == nullid and not fl.cmp(fp1, t):
571 573 return fp1
572 574
573 575 changelist.append(fn)
574 576 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
575 577
576 578 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
577 579 if p1 is None:
578 580 p1, p2 = self.dirstate.parents()
579 581 return self.commit(files=files, text=text, user=user, date=date,
580 582 p1=p1, p2=p2, wlock=wlock)
581 583
582 584 def commit(self, files=None, text="", user=None, date=None,
583 585 match=util.always, force=False, lock=None, wlock=None,
584 586 force_editor=False, p1=None, p2=None, extra={}):
585 587
586 588 commit = []
587 589 remove = []
588 590 changed = []
589 591 use_dirstate = (p1 is None) # not rawcommit
590 592 extra = extra.copy()
591 593
592 594 if use_dirstate:
593 595 if files:
594 596 for f in files:
595 597 s = self.dirstate.state(f)
596 598 if s in 'nmai':
597 599 commit.append(f)
598 600 elif s == 'r':
599 601 remove.append(f)
600 602 else:
601 603 self.ui.warn(_("%s not tracked!\n") % f)
602 604 else:
603 605 changes = self.status(match=match)[:5]
604 606 modified, added, removed, deleted, unknown = changes
605 607 commit = modified + added
606 608 remove = removed
607 609 else:
608 610 commit = files
609 611
610 612 if use_dirstate:
611 613 p1, p2 = self.dirstate.parents()
612 614 update_dirstate = True
613 615 else:
614 616 p1, p2 = p1, p2 or nullid
615 617 update_dirstate = (self.dirstate.parents()[0] == p1)
616 618
617 619 c1 = self.changelog.read(p1)
618 620 c2 = self.changelog.read(p2)
619 621 m1 = self.manifest.read(c1[0]).copy()
620 622 m2 = self.manifest.read(c2[0])
621 623
622 624 if use_dirstate:
623 625 branchname = self.workingctx().branch()
624 626 else:
625 627 branchname = ""
626 628
627 629 if use_dirstate:
628 630 oldname = c1[5].get("branch", "")
629 631 if not commit and not remove and not force and p2 == nullid and \
630 632 branchname == oldname:
631 633 self.ui.status(_("nothing changed\n"))
632 634 return None
633 635
634 636 xp1 = hex(p1)
635 637 if p2 == nullid: xp2 = ''
636 638 else: xp2 = hex(p2)
637 639
638 640 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
639 641
640 642 if not wlock:
641 643 wlock = self.wlock()
642 644 if not lock:
643 645 lock = self.lock()
644 646 tr = self.transaction()
645 647
646 648 # check in files
647 649 new = []
648 650 linkrev = self.changelog.count()
649 651 commit.sort()
650 652 for f in commit:
651 653 self.ui.note(f + "\n")
652 654 try:
653 655 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
654 656 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
655 657 new.append(f)
656 658 except IOError:
657 659 if use_dirstate:
658 660 self.ui.warn(_("trouble committing %s!\n") % f)
659 661 raise
660 662 else:
661 663 remove.append(f)
662 664
663 665 # update manifest
664 666 remove.sort()
665 667
666 668 for f in remove:
667 669 if f in m1:
668 670 del m1[f]
669 671 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
670 672
671 673 # add changeset
672 674 user = user or self.ui.username()
673 675 if not text or force_editor:
674 676 edittext = []
675 677 if text:
676 678 edittext.append(text)
677 679 edittext.append("")
678 680 if p2 != nullid:
679 681 edittext.append("HG: branch merge")
680 682 edittext.extend(["HG: changed %s" % f for f in changed])
681 683 edittext.extend(["HG: removed %s" % f for f in remove])
682 684 if not changed and not remove:
683 685 edittext.append("HG: no files changed")
684 686 edittext.append("")
685 687 # run editor in the repository root
686 688 olddir = os.getcwd()
687 689 os.chdir(self.root)
688 690 text = self.ui.edit("\n".join(edittext), user)
689 691 os.chdir(olddir)
690 692
691 693 lines = [line.rstrip() for line in text.rstrip().splitlines()]
692 694 while lines and not lines[0]:
693 695 del lines[0]
694 696 if not lines:
695 697 return None
696 698 text = '\n'.join(lines)
697 699 if branchname:
698 700 extra["branch"] = branchname
699 701 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
700 702 user, date, extra)
701 703 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
702 704 parent2=xp2)
703 705 tr.close()
704 706
705 707 if use_dirstate or update_dirstate:
706 708 self.dirstate.setparents(n)
707 709 if use_dirstate:
708 710 self.dirstate.update(new, "n")
709 711 self.dirstate.forget(remove)
710 712
711 713 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
712 714 return n
713 715
714 716 def walk(self, node=None, files=[], match=util.always, badmatch=None):
715 717 '''
716 718 walk recursively through the directory tree or a given
717 719 changeset, finding all files matched by the match
718 720 function
719 721
720 722 results are yielded in a tuple (src, filename), where src
721 723 is one of:
722 724 'f' the file was found in the directory tree
723 725 'm' the file was only in the dirstate and not in the tree
724 726 'b' file was not found and matched badmatch
725 727 '''
726 728
727 729 if node:
728 730 fdict = dict.fromkeys(files)
729 731 for fn in self.manifest.read(self.changelog.read(node)[0]):
730 732 for ffn in fdict:
731 733 # match if the file is the exact name or a directory
732 734 if ffn == fn or fn.startswith("%s/" % ffn):
733 735 del fdict[ffn]
734 736 break
735 737 if match(fn):
736 738 yield 'm', fn
737 739 for fn in fdict:
738 740 if badmatch and badmatch(fn):
739 741 if match(fn):
740 742 yield 'b', fn
741 743 else:
742 744 self.ui.warn(_('%s: No such file in rev %s\n') % (
743 745 util.pathto(self.getcwd(), fn), short(node)))
744 746 else:
745 747 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
746 748 yield src, fn
747 749
748 750 def status(self, node1=None, node2=None, files=[], match=util.always,
749 751 wlock=None, list_ignored=False, list_clean=False):
750 752 """return status of files between two nodes or node and working directory
751 753
752 754 If node1 is None, use the first dirstate parent instead.
753 755 If node2 is None, compare node1 with working directory.
754 756 """
755 757
756 758 def fcmp(fn, mf):
757 759 t1 = self.wread(fn)
758 760 return self.file(fn).cmp(mf.get(fn, nullid), t1)
759 761
760 762 def mfmatches(node):
761 763 change = self.changelog.read(node)
762 764 mf = self.manifest.read(change[0]).copy()
763 765 for fn in mf.keys():
764 766 if not match(fn):
765 767 del mf[fn]
766 768 return mf
767 769
768 770 modified, added, removed, deleted, unknown = [], [], [], [], []
769 771 ignored, clean = [], []
770 772
771 773 compareworking = False
772 774 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
773 775 compareworking = True
774 776
775 777 if not compareworking:
776 778 # read the manifest from node1 before the manifest from node2,
777 779 # so that we'll hit the manifest cache if we're going through
778 780 # all the revisions in parent->child order.
779 781 mf1 = mfmatches(node1)
780 782
781 783 # are we comparing the working directory?
782 784 if not node2:
783 785 if not wlock:
784 786 try:
785 787 wlock = self.wlock(wait=0)
786 788 except lock.LockException:
787 789 wlock = None
788 790 (lookup, modified, added, removed, deleted, unknown,
789 791 ignored, clean) = self.dirstate.status(files, match,
790 792 list_ignored, list_clean)
791 793
792 794 # are we comparing working dir against its parent?
793 795 if compareworking:
794 796 if lookup:
795 797 # do a full compare of any files that might have changed
796 798 mf2 = mfmatches(self.dirstate.parents()[0])
797 799 for f in lookup:
798 800 if fcmp(f, mf2):
799 801 modified.append(f)
800 802 else:
801 803 clean.append(f)
802 804 if wlock is not None:
803 805 self.dirstate.update([f], "n")
804 806 else:
805 807 # we are comparing working dir against non-parent
806 808 # generate a pseudo-manifest for the working dir
807 809 # XXX: create it in dirstate.py ?
808 810 mf2 = mfmatches(self.dirstate.parents()[0])
809 811 for f in lookup + modified + added:
810 812 mf2[f] = ""
811 813 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
812 814 for f in removed:
813 815 if f in mf2:
814 816 del mf2[f]
815 817 else:
816 818 # we are comparing two revisions
817 819 mf2 = mfmatches(node2)
818 820
819 821 if not compareworking:
820 822 # flush lists from dirstate before comparing manifests
821 823 modified, added, clean = [], [], []
822 824
823 825 # make sure to sort the files so we talk to the disk in a
824 826 # reasonable order
825 827 mf2keys = mf2.keys()
826 828 mf2keys.sort()
827 829 for fn in mf2keys:
828 830 if mf1.has_key(fn):
829 831 if mf1.flags(fn) != mf2.flags(fn) or \
830 832 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
831 833 modified.append(fn)
832 834 elif list_clean:
833 835 clean.append(fn)
834 836 del mf1[fn]
835 837 else:
836 838 added.append(fn)
837 839
838 840 removed = mf1.keys()
839 841
840 842 # sort and return results:
841 843 for l in modified, added, removed, deleted, unknown, ignored, clean:
842 844 l.sort()
843 845 return (modified, added, removed, deleted, unknown, ignored, clean)
844 846
845 847 def add(self, list, wlock=None):
846 848 if not wlock:
847 849 wlock = self.wlock()
848 850 for f in list:
849 851 p = self.wjoin(f)
850 852 if not os.path.exists(p):
851 853 self.ui.warn(_("%s does not exist!\n") % f)
852 854 elif not os.path.isfile(p):
853 855 self.ui.warn(_("%s not added: only files supported currently\n")
854 856 % f)
855 857 elif self.dirstate.state(f) in 'an':
856 858 self.ui.warn(_("%s already tracked!\n") % f)
857 859 else:
858 860 self.dirstate.update([f], "a")
859 861
860 862 def forget(self, list, wlock=None):
861 863 if not wlock:
862 864 wlock = self.wlock()
863 865 for f in list:
864 866 if self.dirstate.state(f) not in 'ai':
865 867 self.ui.warn(_("%s not added!\n") % f)
866 868 else:
867 869 self.dirstate.forget([f])
868 870
869 871 def remove(self, list, unlink=False, wlock=None):
870 872 if unlink:
871 873 for f in list:
872 874 try:
873 875 util.unlink(self.wjoin(f))
874 876 except OSError, inst:
875 877 if inst.errno != errno.ENOENT:
876 878 raise
877 879 if not wlock:
878 880 wlock = self.wlock()
879 881 for f in list:
880 882 p = self.wjoin(f)
881 883 if os.path.exists(p):
882 884 self.ui.warn(_("%s still exists!\n") % f)
883 885 elif self.dirstate.state(f) == 'a':
884 886 self.dirstate.forget([f])
885 887 elif f not in self.dirstate:
886 888 self.ui.warn(_("%s not tracked!\n") % f)
887 889 else:
888 890 self.dirstate.update([f], "r")
889 891
890 892 def undelete(self, list, wlock=None):
891 893 p = self.dirstate.parents()[0]
892 894 mn = self.changelog.read(p)[0]
893 895 m = self.manifest.read(mn)
894 896 if not wlock:
895 897 wlock = self.wlock()
896 898 for f in list:
897 899 if self.dirstate.state(f) not in "r":
898 900 self.ui.warn("%s not removed!\n" % f)
899 901 else:
900 902 t = self.file(f).read(m[f])
901 903 self.wwrite(f, t)
902 904 util.set_exec(self.wjoin(f), m.execf(f))
903 905 self.dirstate.update([f], "n")
904 906
905 907 def copy(self, source, dest, wlock=None):
906 908 p = self.wjoin(dest)
907 909 if not os.path.exists(p):
908 910 self.ui.warn(_("%s does not exist!\n") % dest)
909 911 elif not os.path.isfile(p):
910 912 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
911 913 else:
912 914 if not wlock:
913 915 wlock = self.wlock()
914 916 if self.dirstate.state(dest) == '?':
915 917 self.dirstate.update([dest], "a")
916 918 self.dirstate.copy(source, dest)
917 919
918 920 def heads(self, start=None):
919 921 heads = self.changelog.heads(start)
920 922 # sort the output in rev descending order
921 923 heads = [(-self.changelog.rev(h), h) for h in heads]
922 924 heads.sort()
923 925 return [n for (r, n) in heads]
924 926
925 927 # branchlookup returns a dict giving a list of branches for
926 928 # each head. A branch is defined as the tag of a node or
927 929 # the branch of the node's parents. If a node has multiple
928 930 # branch tags, tags are eliminated if they are visible from other
929 931 # branch tags.
930 932 #
931 933 # So, for this graph: a->b->c->d->e
932 934 # \ /
933 935 # aa -----/
934 936 # a has tag 2.6.12
935 937 # d has tag 2.6.13
936 938 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
937 939 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
938 940 # from the list.
939 941 #
940 942 # It is possible that more than one head will have the same branch tag.
941 943 # callers need to check the result for multiple heads under the same
942 944 # branch tag if that is a problem for them (ie checkout of a specific
943 945 # branch).
944 946 #
945 947 # passing in a specific branch will limit the depth of the search
946 948 # through the parents. It won't limit the branches returned in the
947 949 # result though.
948 950 def branchlookup(self, heads=None, branch=None):
949 951 if not heads:
950 952 heads = self.heads()
951 953 headt = [ h for h in heads ]
952 954 chlog = self.changelog
953 955 branches = {}
954 956 merges = []
955 957 seenmerge = {}
956 958
957 959 # traverse the tree once for each head, recording in the branches
958 960 # dict which tags are visible from this head. The branches
959 961 # dict also records which tags are visible from each tag
960 962 # while we traverse.
961 963 while headt or merges:
962 964 if merges:
963 965 n, found = merges.pop()
964 966 visit = [n]
965 967 else:
966 968 h = headt.pop()
967 969 visit = [h]
968 970 found = [h]
969 971 seen = {}
970 972 while visit:
971 973 n = visit.pop()
972 974 if n in seen:
973 975 continue
974 976 pp = chlog.parents(n)
975 977 tags = self.nodetags(n)
976 978 if tags:
977 979 for x in tags:
978 980 if x == 'tip':
979 981 continue
980 982 for f in found:
981 983 branches.setdefault(f, {})[n] = 1
982 984 branches.setdefault(n, {})[n] = 1
983 985 break
984 986 if n not in found:
985 987 found.append(n)
986 988 if branch in tags:
987 989 continue
988 990 seen[n] = 1
989 991 if pp[1] != nullid and n not in seenmerge:
990 992 merges.append((pp[1], [x for x in found]))
991 993 seenmerge[n] = 1
992 994 if pp[0] != nullid:
993 995 visit.append(pp[0])
994 996 # traverse the branches dict, eliminating branch tags from each
995 997 # head that are visible from another branch tag for that head.
996 998 out = {}
997 999 viscache = {}
998 1000 for h in heads:
999 1001 def visible(node):
1000 1002 if node in viscache:
1001 1003 return viscache[node]
1002 1004 ret = {}
1003 1005 visit = [node]
1004 1006 while visit:
1005 1007 x = visit.pop()
1006 1008 if x in viscache:
1007 1009 ret.update(viscache[x])
1008 1010 elif x not in ret:
1009 1011 ret[x] = 1
1010 1012 if x in branches:
1011 1013 visit[len(visit):] = branches[x].keys()
1012 1014 viscache[node] = ret
1013 1015 return ret
1014 1016 if h not in branches:
1015 1017 continue
1016 1018 # O(n^2), but somewhat limited. This only searches the
1017 1019 # tags visible from a specific head, not all the tags in the
1018 1020 # whole repo.
1019 1021 for b in branches[h]:
1020 1022 vis = False
1021 1023 for bb in branches[h].keys():
1022 1024 if b != bb:
1023 1025 if b in visible(bb):
1024 1026 vis = True
1025 1027 break
1026 1028 if not vis:
1027 1029 l = out.setdefault(h, [])
1028 1030 l[len(l):] = self.nodetags(b)
1029 1031 return out
1030 1032
1031 1033 def branches(self, nodes):
1032 1034 if not nodes:
1033 1035 nodes = [self.changelog.tip()]
1034 1036 b = []
1035 1037 for n in nodes:
1036 1038 t = n
1037 1039 while 1:
1038 1040 p = self.changelog.parents(n)
1039 1041 if p[1] != nullid or p[0] == nullid:
1040 1042 b.append((t, n, p[0], p[1]))
1041 1043 break
1042 1044 n = p[0]
1043 1045 return b
1044 1046
1045 1047 def between(self, pairs):
1046 1048 r = []
1047 1049
1048 1050 for top, bottom in pairs:
1049 1051 n, l, i = top, [], 0
1050 1052 f = 1
1051 1053
1052 1054 while n != bottom:
1053 1055 p = self.changelog.parents(n)[0]
1054 1056 if i == f:
1055 1057 l.append(n)
1056 1058 f = f * 2
1057 1059 n = p
1058 1060 i += 1
1059 1061
1060 1062 r.append(l)
1061 1063
1062 1064 return r
1063 1065
1064 1066 def findincoming(self, remote, base=None, heads=None, force=False):
1065 1067 """Return list of roots of the subsets of missing nodes from remote
1066 1068
1067 1069 If base dict is specified, assume that these nodes and their parents
1068 1070 exist on the remote side and that no child of a node of base exists
1069 1071 in both remote and self.
1070 1072 Furthermore base will be updated to include the nodes that exists
1071 1073 in self and remote but no children exists in self and remote.
1072 1074 If a list of heads is specified, return only nodes which are heads
1073 1075 or ancestors of these heads.
1074 1076
1075 1077 All the ancestors of base are in self and in remote.
1076 1078 All the descendants of the list returned are missing in self.
1077 1079 (and so we know that the rest of the nodes are missing in remote, see
1078 1080 outgoing)
1079 1081 """
1080 1082 m = self.changelog.nodemap
1081 1083 search = []
1082 1084 fetch = {}
1083 1085 seen = {}
1084 1086 seenbranch = {}
1085 1087 if base == None:
1086 1088 base = {}
1087 1089
1088 1090 if not heads:
1089 1091 heads = remote.heads()
1090 1092
1091 1093 if self.changelog.tip() == nullid:
1092 1094 base[nullid] = 1
1093 1095 if heads != [nullid]:
1094 1096 return [nullid]
1095 1097 return []
1096 1098
1097 1099 # assume we're closer to the tip than the root
1098 1100 # and start by examining the heads
1099 1101 self.ui.status(_("searching for changes\n"))
1100 1102
1101 1103 unknown = []
1102 1104 for h in heads:
1103 1105 if h not in m:
1104 1106 unknown.append(h)
1105 1107 else:
1106 1108 base[h] = 1
1107 1109
1108 1110 if not unknown:
1109 1111 return []
1110 1112
1111 1113 req = dict.fromkeys(unknown)
1112 1114 reqcnt = 0
1113 1115
1114 1116 # search through remote branches
1115 1117 # a 'branch' here is a linear segment of history, with four parts:
1116 1118 # head, root, first parent, second parent
1117 1119 # (a branch always has two parents (or none) by definition)
1118 1120 unknown = remote.branches(unknown)
1119 1121 while unknown:
1120 1122 r = []
1121 1123 while unknown:
1122 1124 n = unknown.pop(0)
1123 1125 if n[0] in seen:
1124 1126 continue
1125 1127
1126 1128 self.ui.debug(_("examining %s:%s\n")
1127 1129 % (short(n[0]), short(n[1])))
1128 1130 if n[0] == nullid: # found the end of the branch
1129 1131 pass
1130 1132 elif n in seenbranch:
1131 1133 self.ui.debug(_("branch already found\n"))
1132 1134 continue
1133 1135 elif n[1] and n[1] in m: # do we know the base?
1134 1136 self.ui.debug(_("found incomplete branch %s:%s\n")
1135 1137 % (short(n[0]), short(n[1])))
1136 1138 search.append(n) # schedule branch range for scanning
1137 1139 seenbranch[n] = 1
1138 1140 else:
1139 1141 if n[1] not in seen and n[1] not in fetch:
1140 1142 if n[2] in m and n[3] in m:
1141 1143 self.ui.debug(_("found new changeset %s\n") %
1142 1144 short(n[1]))
1143 1145 fetch[n[1]] = 1 # earliest unknown
1144 1146 for p in n[2:4]:
1145 1147 if p in m:
1146 1148 base[p] = 1 # latest known
1147 1149
1148 1150 for p in n[2:4]:
1149 1151 if p not in req and p not in m:
1150 1152 r.append(p)
1151 1153 req[p] = 1
1152 1154 seen[n[0]] = 1
1153 1155
1154 1156 if r:
1155 1157 reqcnt += 1
1156 1158 self.ui.debug(_("request %d: %s\n") %
1157 1159 (reqcnt, " ".join(map(short, r))))
1158 1160 for p in xrange(0, len(r), 10):
1159 1161 for b in remote.branches(r[p:p+10]):
1160 1162 self.ui.debug(_("received %s:%s\n") %
1161 1163 (short(b[0]), short(b[1])))
1162 1164 unknown.append(b)
1163 1165
1164 1166 # do binary search on the branches we found
1165 1167 while search:
1166 1168 n = search.pop(0)
1167 1169 reqcnt += 1
1168 1170 l = remote.between([(n[0], n[1])])[0]
1169 1171 l.append(n[1])
1170 1172 p = n[0]
1171 1173 f = 1
1172 1174 for i in l:
1173 1175 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1174 1176 if i in m:
1175 1177 if f <= 2:
1176 1178 self.ui.debug(_("found new branch changeset %s\n") %
1177 1179 short(p))
1178 1180 fetch[p] = 1
1179 1181 base[i] = 1
1180 1182 else:
1181 1183 self.ui.debug(_("narrowed branch search to %s:%s\n")
1182 1184 % (short(p), short(i)))
1183 1185 search.append((p, i))
1184 1186 break
1185 1187 p, f = i, f * 2
1186 1188
1187 1189 # sanity check our fetch list
1188 1190 for f in fetch.keys():
1189 1191 if f in m:
1190 1192 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1191 1193
1192 1194 if base.keys() == [nullid]:
1193 1195 if force:
1194 1196 self.ui.warn(_("warning: repository is unrelated\n"))
1195 1197 else:
1196 1198 raise util.Abort(_("repository is unrelated"))
1197 1199
1198 1200 self.ui.debug(_("found new changesets starting at ") +
1199 1201 " ".join([short(f) for f in fetch]) + "\n")
1200 1202
1201 1203 self.ui.debug(_("%d total queries\n") % reqcnt)
1202 1204
1203 1205 return fetch.keys()
1204 1206
1205 1207 def findoutgoing(self, remote, base=None, heads=None, force=False):
1206 1208 """Return list of nodes that are roots of subsets not in remote
1207 1209
1208 1210 If base dict is specified, assume that these nodes and their parents
1209 1211 exist on the remote side.
1210 1212 If a list of heads is specified, return only nodes which are heads
1211 1213 or ancestors of these heads, and return a second element which
1212 1214 contains all remote heads which get new children.
1213 1215 """
1214 1216 if base == None:
1215 1217 base = {}
1216 1218 self.findincoming(remote, base, heads, force=force)
1217 1219
1218 1220 self.ui.debug(_("common changesets up to ")
1219 1221 + " ".join(map(short, base.keys())) + "\n")
1220 1222
1221 1223 remain = dict.fromkeys(self.changelog.nodemap)
1222 1224
1223 1225 # prune everything remote has from the tree
1224 1226 del remain[nullid]
1225 1227 remove = base.keys()
1226 1228 while remove:
1227 1229 n = remove.pop(0)
1228 1230 if n in remain:
1229 1231 del remain[n]
1230 1232 for p in self.changelog.parents(n):
1231 1233 remove.append(p)
1232 1234
1233 1235 # find every node whose parents have been pruned
1234 1236 subset = []
1235 1237 # find every remote head that will get new children
1236 1238 updated_heads = {}
1237 1239 for n in remain:
1238 1240 p1, p2 = self.changelog.parents(n)
1239 1241 if p1 not in remain and p2 not in remain:
1240 1242 subset.append(n)
1241 1243 if heads:
1242 1244 if p1 in heads:
1243 1245 updated_heads[p1] = True
1244 1246 if p2 in heads:
1245 1247 updated_heads[p2] = True
1246 1248
1247 1249 # this is the set of all roots we have to push
1248 1250 if heads:
1249 1251 return subset, updated_heads.keys()
1250 1252 else:
1251 1253 return subset
1252 1254
1253 1255 def pull(self, remote, heads=None, force=False, lock=None):
1254 1256 mylock = False
1255 1257 if not lock:
1256 1258 lock = self.lock()
1257 1259 mylock = True
1258 1260
1259 1261 try:
1260 1262 fetch = self.findincoming(remote, force=force)
1261 1263 if fetch == [nullid]:
1262 1264 self.ui.status(_("requesting all changes\n"))
1263 1265
1264 1266 if not fetch:
1265 1267 self.ui.status(_("no changes found\n"))
1266 1268 return 0
1267 1269
1268 1270 if heads is None:
1269 1271 cg = remote.changegroup(fetch, 'pull')
1270 1272 else:
1271 1273 if 'changegroupsubset' not in remote.capabilities:
1272 1274 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1273 1275 cg = remote.changegroupsubset(fetch, heads, 'pull')
1274 1276 return self.addchangegroup(cg, 'pull', remote.url())
1275 1277 finally:
1276 1278 if mylock:
1277 1279 lock.release()
1278 1280
1279 1281 def push(self, remote, force=False, revs=None):
1280 1282 # there are two ways to push to remote repo:
1281 1283 #
1282 1284 # addchangegroup assumes local user can lock remote
1283 1285 # repo (local filesystem, old ssh servers).
1284 1286 #
1285 1287 # unbundle assumes local user cannot lock remote repo (new ssh
1286 1288 # servers, http servers).
1287 1289
1288 1290 if remote.capable('unbundle'):
1289 1291 return self.push_unbundle(remote, force, revs)
1290 1292 return self.push_addchangegroup(remote, force, revs)
1291 1293
1292 1294 def prepush(self, remote, force, revs):
1293 1295 base = {}
1294 1296 remote_heads = remote.heads()
1295 1297 inc = self.findincoming(remote, base, remote_heads, force=force)
1296 1298 if not force and inc:
1297 1299 self.ui.warn(_("abort: unsynced remote changes!\n"))
1298 1300 self.ui.status(_("(did you forget to sync?"
1299 1301 " use push -f to force)\n"))
1300 1302 return None, 1
1301 1303
1302 1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 1305 if revs is not None:
1304 1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 1307 else:
1306 1308 bases, heads = update, self.changelog.heads()
1307 1309
1308 1310 if not bases:
1309 1311 self.ui.status(_("no changes found\n"))
1310 1312 return None, 1
1311 1313 elif not force:
1312 1314 # FIXME we don't properly detect creation of new heads
1313 1315 # in the push -r case, assume the user knows what he's doing
1314 1316 if not revs and len(remote_heads) < len(heads) \
1315 1317 and remote_heads != [nullid]:
1316 1318 self.ui.warn(_("abort: push creates new remote branches!\n"))
1317 1319 self.ui.status(_("(did you forget to merge?"
1318 1320 " use push -f to force)\n"))
1319 1321 return None, 1
1320 1322
1321 1323 if revs is None:
1322 1324 cg = self.changegroup(update, 'push')
1323 1325 else:
1324 1326 cg = self.changegroupsubset(update, revs, 'push')
1325 1327 return cg, remote_heads
1326 1328
1327 1329 def push_addchangegroup(self, remote, force, revs):
1328 1330 lock = remote.lock()
1329 1331
1330 1332 ret = self.prepush(remote, force, revs)
1331 1333 if ret[0] is not None:
1332 1334 cg, remote_heads = ret
1333 1335 return remote.addchangegroup(cg, 'push', self.url())
1334 1336 return ret[1]
1335 1337
1336 1338 def push_unbundle(self, remote, force, revs):
1337 1339 # local repo finds heads on server, finds out what revs it
1338 1340 # must push. once revs transferred, if server finds it has
1339 1341 # different heads (someone else won commit/push race), server
1340 1342 # aborts.
1341 1343
1342 1344 ret = self.prepush(remote, force, revs)
1343 1345 if ret[0] is not None:
1344 1346 cg, remote_heads = ret
1345 1347 if force: remote_heads = ['force']
1346 1348 return remote.unbundle(cg, remote_heads, 'push')
1347 1349 return ret[1]
1348 1350
1349 1351 def changegroupinfo(self, nodes):
1350 1352 self.ui.note(_("%d changesets found\n") % len(nodes))
1351 1353 if self.ui.debugflag:
1352 1354 self.ui.debug(_("List of changesets:\n"))
1353 1355 for node in nodes:
1354 1356 self.ui.debug("%s\n" % hex(node))
1355 1357
1356 1358 def changegroupsubset(self, bases, heads, source):
1357 1359 """This function generates a changegroup consisting of all the nodes
1358 1360 that are descendents of any of the bases, and ancestors of any of
1359 1361 the heads.
1360 1362
1361 1363 It is fairly complex as determining which filenodes and which
1362 1364 manifest nodes need to be included for the changeset to be complete
1363 1365 is non-trivial.
1364 1366
1365 1367 Another wrinkle is doing the reverse, figuring out which changeset in
1366 1368 the changegroup a particular filenode or manifestnode belongs to."""
1367 1369
1368 1370 self.hook('preoutgoing', throw=True, source=source)
1369 1371
1370 1372 # Set up some initial variables
1371 1373 # Make it easy to refer to self.changelog
1372 1374 cl = self.changelog
1373 1375 # msng is short for missing - compute the list of changesets in this
1374 1376 # changegroup.
1375 1377 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1376 1378 self.changegroupinfo(msng_cl_lst)
1377 1379 # Some bases may turn out to be superfluous, and some heads may be
1378 1380 # too. nodesbetween will return the minimal set of bases and heads
1379 1381 # necessary to re-create the changegroup.
1380 1382
1381 1383 # Known heads are the list of heads that it is assumed the recipient
1382 1384 # of this changegroup will know about.
1383 1385 knownheads = {}
1384 1386 # We assume that all parents of bases are known heads.
1385 1387 for n in bases:
1386 1388 for p in cl.parents(n):
1387 1389 if p != nullid:
1388 1390 knownheads[p] = 1
1389 1391 knownheads = knownheads.keys()
1390 1392 if knownheads:
1391 1393 # Now that we know what heads are known, we can compute which
1392 1394 # changesets are known. The recipient must know about all
1393 1395 # changesets required to reach the known heads from the null
1394 1396 # changeset.
1395 1397 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1396 1398 junk = None
1397 1399 # Transform the list into an ersatz set.
1398 1400 has_cl_set = dict.fromkeys(has_cl_set)
1399 1401 else:
1400 1402 # If there were no known heads, the recipient cannot be assumed to
1401 1403 # know about any changesets.
1402 1404 has_cl_set = {}
1403 1405
1404 1406 # Make it easy to refer to self.manifest
1405 1407 mnfst = self.manifest
1406 1408 # We don't know which manifests are missing yet
1407 1409 msng_mnfst_set = {}
1408 1410 # Nor do we know which filenodes are missing.
1409 1411 msng_filenode_set = {}
1410 1412
1411 1413 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1412 1414 junk = None
1413 1415
1414 1416 # A changeset always belongs to itself, so the changenode lookup
1415 1417 # function for a changenode is identity.
1416 1418 def identity(x):
1417 1419 return x
1418 1420
1419 1421 # A function generating function. Sets up an environment for the
1420 1422 # inner function.
1421 1423 def cmp_by_rev_func(revlog):
1422 1424 # Compare two nodes by their revision number in the environment's
1423 1425 # revision history. Since the revision number both represents the
1424 1426 # most efficient order to read the nodes in, and represents a
1425 1427 # topological sorting of the nodes, this function is often useful.
1426 1428 def cmp_by_rev(a, b):
1427 1429 return cmp(revlog.rev(a), revlog.rev(b))
1428 1430 return cmp_by_rev
1429 1431
1430 1432 # If we determine that a particular file or manifest node must be a
1431 1433 # node that the recipient of the changegroup will already have, we can
1432 1434 # also assume the recipient will have all the parents. This function
1433 1435 # prunes them from the set of missing nodes.
1434 1436 def prune_parents(revlog, hasset, msngset):
1435 1437 haslst = hasset.keys()
1436 1438 haslst.sort(cmp_by_rev_func(revlog))
1437 1439 for node in haslst:
1438 1440 parentlst = [p for p in revlog.parents(node) if p != nullid]
1439 1441 while parentlst:
1440 1442 n = parentlst.pop()
1441 1443 if n not in hasset:
1442 1444 hasset[n] = 1
1443 1445 p = [p for p in revlog.parents(n) if p != nullid]
1444 1446 parentlst.extend(p)
1445 1447 for n in hasset:
1446 1448 msngset.pop(n, None)
1447 1449
1448 1450 # This is a function generating function used to set up an environment
1449 1451 # for the inner function to execute in.
1450 1452 def manifest_and_file_collector(changedfileset):
1451 1453 # This is an information gathering function that gathers
1452 1454 # information from each changeset node that goes out as part of
1453 1455 # the changegroup. The information gathered is a list of which
1454 1456 # manifest nodes are potentially required (the recipient may
1455 1457 # already have them) and total list of all files which were
1456 1458 # changed in any changeset in the changegroup.
1457 1459 #
1458 1460 # We also remember the first changenode we saw any manifest
1459 1461 # referenced by so we can later determine which changenode 'owns'
1460 1462 # the manifest.
1461 1463 def collect_manifests_and_files(clnode):
1462 1464 c = cl.read(clnode)
1463 1465 for f in c[3]:
1464 1466 # This is to make sure we only have one instance of each
1465 1467 # filename string for each filename.
1466 1468 changedfileset.setdefault(f, f)
1467 1469 msng_mnfst_set.setdefault(c[0], clnode)
1468 1470 return collect_manifests_and_files
1469 1471
1470 1472 # Figure out which manifest nodes (of the ones we think might be part
1471 1473 # of the changegroup) the recipient must know about and remove them
1472 1474 # from the changegroup.
1473 1475 def prune_manifests():
1474 1476 has_mnfst_set = {}
1475 1477 for n in msng_mnfst_set:
1476 1478 # If a 'missing' manifest thinks it belongs to a changenode
1477 1479 # the recipient is assumed to have, obviously the recipient
1478 1480 # must have that manifest.
1479 1481 linknode = cl.node(mnfst.linkrev(n))
1480 1482 if linknode in has_cl_set:
1481 1483 has_mnfst_set[n] = 1
1482 1484 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1483 1485
1484 1486 # Use the information collected in collect_manifests_and_files to say
1485 1487 # which changenode any manifestnode belongs to.
1486 1488 def lookup_manifest_link(mnfstnode):
1487 1489 return msng_mnfst_set[mnfstnode]
1488 1490
1489 1491 # A function generating function that sets up the initial environment
1490 1492 # the inner function.
1491 1493 def filenode_collector(changedfiles):
1492 1494 next_rev = [0]
1493 1495 # This gathers information from each manifestnode included in the
1494 1496 # changegroup about which filenodes the manifest node references
1495 1497 # so we can include those in the changegroup too.
1496 1498 #
1497 1499 # It also remembers which changenode each filenode belongs to. It
1498 1500 # does this by assuming the a filenode belongs to the changenode
1499 1501 # the first manifest that references it belongs to.
1500 1502 def collect_msng_filenodes(mnfstnode):
1501 1503 r = mnfst.rev(mnfstnode)
1502 1504 if r == next_rev[0]:
1503 1505 # If the last rev we looked at was the one just previous,
1504 1506 # we only need to see a diff.
1505 1507 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1506 1508 # For each line in the delta
1507 1509 for dline in delta.splitlines():
1508 1510 # get the filename and filenode for that line
1509 1511 f, fnode = dline.split('\0')
1510 1512 fnode = bin(fnode[:40])
1511 1513 f = changedfiles.get(f, None)
1512 1514 # And if the file is in the list of files we care
1513 1515 # about.
1514 1516 if f is not None:
1515 1517 # Get the changenode this manifest belongs to
1516 1518 clnode = msng_mnfst_set[mnfstnode]
1517 1519 # Create the set of filenodes for the file if
1518 1520 # there isn't one already.
1519 1521 ndset = msng_filenode_set.setdefault(f, {})
1520 1522 # And set the filenode's changelog node to the
1521 1523 # manifest's if it hasn't been set already.
1522 1524 ndset.setdefault(fnode, clnode)
1523 1525 else:
1524 1526 # Otherwise we need a full manifest.
1525 1527 m = mnfst.read(mnfstnode)
1526 1528 # For every file in we care about.
1527 1529 for f in changedfiles:
1528 1530 fnode = m.get(f, None)
1529 1531 # If it's in the manifest
1530 1532 if fnode is not None:
1531 1533 # See comments above.
1532 1534 clnode = msng_mnfst_set[mnfstnode]
1533 1535 ndset = msng_filenode_set.setdefault(f, {})
1534 1536 ndset.setdefault(fnode, clnode)
1535 1537 # Remember the revision we hope to see next.
1536 1538 next_rev[0] = r + 1
1537 1539 return collect_msng_filenodes
1538 1540
1539 1541 # We have a list of filenodes we think we need for a file, lets remove
1540 1542 # all those we now the recipient must have.
1541 1543 def prune_filenodes(f, filerevlog):
1542 1544 msngset = msng_filenode_set[f]
1543 1545 hasset = {}
1544 1546 # If a 'missing' filenode thinks it belongs to a changenode we
1545 1547 # assume the recipient must have, then the recipient must have
1546 1548 # that filenode.
1547 1549 for n in msngset:
1548 1550 clnode = cl.node(filerevlog.linkrev(n))
1549 1551 if clnode in has_cl_set:
1550 1552 hasset[n] = 1
1551 1553 prune_parents(filerevlog, hasset, msngset)
1552 1554
1553 1555 # A function generator function that sets up the a context for the
1554 1556 # inner function.
1555 1557 def lookup_filenode_link_func(fname):
1556 1558 msngset = msng_filenode_set[fname]
1557 1559 # Lookup the changenode the filenode belongs to.
1558 1560 def lookup_filenode_link(fnode):
1559 1561 return msngset[fnode]
1560 1562 return lookup_filenode_link
1561 1563
1562 1564 # Now that we have all theses utility functions to help out and
1563 1565 # logically divide up the task, generate the group.
1564 1566 def gengroup():
1565 1567 # The set of changed files starts empty.
1566 1568 changedfiles = {}
1567 1569 # Create a changenode group generator that will call our functions
1568 1570 # back to lookup the owning changenode and collect information.
1569 1571 group = cl.group(msng_cl_lst, identity,
1570 1572 manifest_and_file_collector(changedfiles))
1571 1573 for chnk in group:
1572 1574 yield chnk
1573 1575
1574 1576 # The list of manifests has been collected by the generator
1575 1577 # calling our functions back.
1576 1578 prune_manifests()
1577 1579 msng_mnfst_lst = msng_mnfst_set.keys()
1578 1580 # Sort the manifestnodes by revision number.
1579 1581 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1580 1582 # Create a generator for the manifestnodes that calls our lookup
1581 1583 # and data collection functions back.
1582 1584 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1583 1585 filenode_collector(changedfiles))
1584 1586 for chnk in group:
1585 1587 yield chnk
1586 1588
1587 1589 # These are no longer needed, dereference and toss the memory for
1588 1590 # them.
1589 1591 msng_mnfst_lst = None
1590 1592 msng_mnfst_set.clear()
1591 1593
1592 1594 changedfiles = changedfiles.keys()
1593 1595 changedfiles.sort()
1594 1596 # Go through all our files in order sorted by name.
1595 1597 for fname in changedfiles:
1596 1598 filerevlog = self.file(fname)
1597 1599 # Toss out the filenodes that the recipient isn't really
1598 1600 # missing.
1599 1601 if msng_filenode_set.has_key(fname):
1600 1602 prune_filenodes(fname, filerevlog)
1601 1603 msng_filenode_lst = msng_filenode_set[fname].keys()
1602 1604 else:
1603 1605 msng_filenode_lst = []
1604 1606 # If any filenodes are left, generate the group for them,
1605 1607 # otherwise don't bother.
1606 1608 if len(msng_filenode_lst) > 0:
1607 1609 yield changegroup.genchunk(fname)
1608 1610 # Sort the filenodes by their revision #
1609 1611 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1610 1612 # Create a group generator and only pass in a changenode
1611 1613 # lookup function as we need to collect no information
1612 1614 # from filenodes.
1613 1615 group = filerevlog.group(msng_filenode_lst,
1614 1616 lookup_filenode_link_func(fname))
1615 1617 for chnk in group:
1616 1618 yield chnk
1617 1619 if msng_filenode_set.has_key(fname):
1618 1620 # Don't need this anymore, toss it to free memory.
1619 1621 del msng_filenode_set[fname]
1620 1622 # Signal that no more groups are left.
1621 1623 yield changegroup.closechunk()
1622 1624
1623 1625 if msng_cl_lst:
1624 1626 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1625 1627
1626 1628 return util.chunkbuffer(gengroup())
1627 1629
1628 1630 def changegroup(self, basenodes, source):
1629 1631 """Generate a changegroup of all nodes that we have that a recipient
1630 1632 doesn't.
1631 1633
1632 1634 This is much easier than the previous function as we can assume that
1633 1635 the recipient has any changenode we aren't sending them."""
1634 1636
1635 1637 self.hook('preoutgoing', throw=True, source=source)
1636 1638
1637 1639 cl = self.changelog
1638 1640 nodes = cl.nodesbetween(basenodes, None)[0]
1639 1641 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1640 1642 self.changegroupinfo(nodes)
1641 1643
1642 1644 def identity(x):
1643 1645 return x
1644 1646
1645 1647 def gennodelst(revlog):
1646 1648 for r in xrange(0, revlog.count()):
1647 1649 n = revlog.node(r)
1648 1650 if revlog.linkrev(n) in revset:
1649 1651 yield n
1650 1652
1651 1653 def changed_file_collector(changedfileset):
1652 1654 def collect_changed_files(clnode):
1653 1655 c = cl.read(clnode)
1654 1656 for fname in c[3]:
1655 1657 changedfileset[fname] = 1
1656 1658 return collect_changed_files
1657 1659
1658 1660 def lookuprevlink_func(revlog):
1659 1661 def lookuprevlink(n):
1660 1662 return cl.node(revlog.linkrev(n))
1661 1663 return lookuprevlink
1662 1664
1663 1665 def gengroup():
1664 1666 # construct a list of all changed files
1665 1667 changedfiles = {}
1666 1668
1667 1669 for chnk in cl.group(nodes, identity,
1668 1670 changed_file_collector(changedfiles)):
1669 1671 yield chnk
1670 1672 changedfiles = changedfiles.keys()
1671 1673 changedfiles.sort()
1672 1674
1673 1675 mnfst = self.manifest
1674 1676 nodeiter = gennodelst(mnfst)
1675 1677 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1676 1678 yield chnk
1677 1679
1678 1680 for fname in changedfiles:
1679 1681 filerevlog = self.file(fname)
1680 1682 nodeiter = gennodelst(filerevlog)
1681 1683 nodeiter = list(nodeiter)
1682 1684 if nodeiter:
1683 1685 yield changegroup.genchunk(fname)
1684 1686 lookup = lookuprevlink_func(filerevlog)
1685 1687 for chnk in filerevlog.group(nodeiter, lookup):
1686 1688 yield chnk
1687 1689
1688 1690 yield changegroup.closechunk()
1689 1691
1690 1692 if nodes:
1691 1693 self.hook('outgoing', node=hex(nodes[0]), source=source)
1692 1694
1693 1695 return util.chunkbuffer(gengroup())
1694 1696
1695 1697 def addchangegroup(self, source, srctype, url):
1696 1698 """add changegroup to repo.
1697 1699 returns number of heads modified or added + 1."""
1698 1700
1699 1701 def csmap(x):
1700 1702 self.ui.debug(_("add changeset %s\n") % short(x))
1701 1703 return cl.count()
1702 1704
1703 1705 def revmap(x):
1704 1706 return cl.rev(x)
1705 1707
1706 1708 if not source:
1707 1709 return 0
1708 1710
1709 1711 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1710 1712
1711 1713 changesets = files = revisions = 0
1712 1714
1713 1715 tr = self.transaction()
1714 1716
1715 1717 # write changelog data to temp files so concurrent readers will not see
1716 1718 # inconsistent view
1717 1719 cl = None
1718 1720 try:
1719 1721 cl = appendfile.appendchangelog(self.sopener,
1720 1722 self.changelog.version)
1721 1723
1722 1724 oldheads = len(cl.heads())
1723 1725
1724 1726 # pull off the changeset group
1725 1727 self.ui.status(_("adding changesets\n"))
1726 1728 cor = cl.count() - 1
1727 1729 chunkiter = changegroup.chunkiter(source)
1728 1730 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1729 1731 raise util.Abort(_("received changelog group is empty"))
1730 1732 cnr = cl.count() - 1
1731 1733 changesets = cnr - cor
1732 1734
1733 1735 # pull off the manifest group
1734 1736 self.ui.status(_("adding manifests\n"))
1735 1737 chunkiter = changegroup.chunkiter(source)
1736 1738 # no need to check for empty manifest group here:
1737 1739 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1738 1740 # no new manifest will be created and the manifest group will
1739 1741 # be empty during the pull
1740 1742 self.manifest.addgroup(chunkiter, revmap, tr)
1741 1743
1742 1744 # process the files
1743 1745 self.ui.status(_("adding file changes\n"))
1744 1746 while 1:
1745 1747 f = changegroup.getchunk(source)
1746 1748 if not f:
1747 1749 break
1748 1750 self.ui.debug(_("adding %s revisions\n") % f)
1749 1751 fl = self.file(f)
1750 1752 o = fl.count()
1751 1753 chunkiter = changegroup.chunkiter(source)
1752 1754 if fl.addgroup(chunkiter, revmap, tr) is None:
1753 1755 raise util.Abort(_("received file revlog group is empty"))
1754 1756 revisions += fl.count() - o
1755 1757 files += 1
1756 1758
1757 1759 cl.writedata()
1758 1760 finally:
1759 1761 if cl:
1760 1762 cl.cleanup()
1761 1763
1762 1764 # make changelog see real files again
1763 1765 self.changelog = changelog.changelog(self.sopener,
1764 1766 self.changelog.version)
1765 1767 self.changelog.checkinlinesize(tr)
1766 1768
1767 1769 newheads = len(self.changelog.heads())
1768 1770 heads = ""
1769 1771 if oldheads and newheads != oldheads:
1770 1772 heads = _(" (%+d heads)") % (newheads - oldheads)
1771 1773
1772 1774 self.ui.status(_("added %d changesets"
1773 1775 " with %d changes to %d files%s\n")
1774 1776 % (changesets, revisions, files, heads))
1775 1777
1776 1778 if changesets > 0:
1777 1779 self.hook('pretxnchangegroup', throw=True,
1778 1780 node=hex(self.changelog.node(cor+1)), source=srctype,
1779 1781 url=url)
1780 1782
1781 1783 tr.close()
1782 1784
1783 1785 if changesets > 0:
1784 1786 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1785 1787 source=srctype, url=url)
1786 1788
1787 1789 for i in xrange(cor + 1, cnr + 1):
1788 1790 self.hook("incoming", node=hex(self.changelog.node(i)),
1789 1791 source=srctype, url=url)
1790 1792
1791 1793 return newheads - oldheads + 1
1792 1794
1793 1795
1794 1796 def stream_in(self, remote):
1795 1797 fp = remote.stream_out()
1796 1798 l = fp.readline()
1797 1799 try:
1798 1800 resp = int(l)
1799 1801 except ValueError:
1800 1802 raise util.UnexpectedOutput(
1801 1803 _('Unexpected response from remote server:'), l)
1802 1804 if resp != 0:
1803 1805 raise util.Abort(_('operation forbidden by server'))
1804 1806 self.ui.status(_('streaming all changes\n'))
1805 1807 l = fp.readline()
1806 1808 try:
1807 1809 total_files, total_bytes = map(int, l.split(' ', 1))
1808 1810 except ValueError, TypeError:
1809 1811 raise util.UnexpectedOutput(
1810 1812 _('Unexpected response from remote server:'), l)
1811 1813 self.ui.status(_('%d files to transfer, %s of data\n') %
1812 1814 (total_files, util.bytecount(total_bytes)))
1813 1815 start = time.time()
1814 1816 for i in xrange(total_files):
1815 1817 l = fp.readline()
1816 1818 try:
1817 1819 name, size = l.split('\0', 1)
1818 1820 size = int(size)
1819 1821 except ValueError, TypeError:
1820 1822 raise util.UnexpectedOutput(
1821 1823 _('Unexpected response from remote server:'), l)
1822 1824 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1823 1825 ofp = self.sopener(name, 'w')
1824 1826 for chunk in util.filechunkiter(fp, limit=size):
1825 1827 ofp.write(chunk)
1826 1828 ofp.close()
1827 1829 elapsed = time.time() - start
1828 1830 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1829 1831 (util.bytecount(total_bytes), elapsed,
1830 1832 util.bytecount(total_bytes / elapsed)))
1831 1833 self.reload()
1832 1834 return len(self.heads()) + 1
1833 1835
1834 1836 def clone(self, remote, heads=[], stream=False):
1835 1837 '''clone remote repository.
1836 1838
1837 1839 keyword arguments:
1838 1840 heads: list of revs to clone (forces use of pull)
1839 1841 stream: use streaming clone if possible'''
1840 1842
1841 1843 # now, all clients that can request uncompressed clones can
1842 1844 # read repo formats supported by all servers that can serve
1843 1845 # them.
1844 1846
1845 1847 # if revlog format changes, client will have to check version
1846 1848 # and format flags on "stream" capability, and use
1847 1849 # uncompressed only if compatible.
1848 1850
1849 1851 if stream and not heads and remote.capable('stream'):
1850 1852 return self.stream_in(remote)
1851 1853 return self.pull(remote, heads)
1852 1854
1853 1855 # used to avoid circular references so destructors work
1854 1856 def aftertrans(base):
1855 1857 p = base
1856 1858 def a():
1857 1859 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1858 1860 util.rename(os.path.join(p, "journal.dirstate"),
1859 1861 os.path.join(p, "undo.dirstate"))
1860 1862 return a
1861 1863
1862 1864 def instance(ui, path, create):
1863 1865 return localrepository(ui, util.drop_scheme('file', path), create)
1864 1866
1865 1867 def islocal(path):
1866 1868 return True
General Comments 0
You need to be logged in to leave comments. Login now