##// END OF EJS Templates
automatically update the branch cache when tip changes
Alexis S. L. Carvalho -
r6121:7336aeff default
parent child Browse files
Show More
@@ -1,2105 +1,2117
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71
72 72 try:
73 73 # files in .hg/ will be created using this mode
74 74 mode = os.stat(self.spath).st_mode
75 75 # avoid some useless chmods
76 76 if (0777 & ~util._umask) == (0777 & mode):
77 77 mode = None
78 78 except OSError:
79 79 mode = None
80 80
81 81 self._createmode = mode
82 82 self.opener.createmode = mode
83 83 sopener = util.opener(self.spath)
84 84 sopener.createmode = mode
85 85 self.sopener = util.encodedopener(sopener, self.encodefn)
86 86
87 87 self.ui = ui.ui(parentui=parentui)
88 88 try:
89 89 self.ui.readconfig(self.join("hgrc"), self.root)
90 90 extensions.loadall(self.ui)
91 91 except IOError:
92 92 pass
93 93
94 94 self.tagscache = None
95 95 self._tagstypecache = None
96 96 self.branchcache = None
97 97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 99 self.nodetagscache = None
99 100 self.filterpats = {}
100 101 self._datafilters = {}
101 102 self._transref = self._lockref = self._wlockref = None
102 103
103 104 def __getattr__(self, name):
104 105 if name == 'changelog':
105 106 self.changelog = changelog.changelog(self.sopener)
106 107 self.sopener.defversion = self.changelog.version
107 108 return self.changelog
108 109 if name == 'manifest':
109 110 self.changelog
110 111 self.manifest = manifest.manifest(self.sopener)
111 112 return self.manifest
112 113 if name == 'dirstate':
113 114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 115 return self.dirstate
115 116 else:
116 117 raise AttributeError, name
117 118
118 119 def url(self):
119 120 return 'file:' + self.root
120 121
121 122 def hook(self, name, throw=False, **args):
122 123 return hook.hook(self.ui, self, name, throw, **args)
123 124
124 125 tag_disallowed = ':\r\n'
125 126
126 127 def _tag(self, name, node, message, local, user, date, parent=None,
127 128 extra={}):
128 129 use_dirstate = parent is None
129 130
130 131 for c in self.tag_disallowed:
131 132 if c in name:
132 133 raise util.Abort(_('%r cannot be used in a tag name') % c)
133 134
134 135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135 136
136 137 def writetag(fp, name, munge, prevtags):
137 138 fp.seek(0, 2)
138 139 if prevtags and prevtags[-1] != '\n':
139 140 fp.write('\n')
140 141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 142 fp.close()
142 143
143 144 prevtags = ''
144 145 if local:
145 146 try:
146 147 fp = self.opener('localtags', 'r+')
147 148 except IOError, err:
148 149 fp = self.opener('localtags', 'a')
149 150 else:
150 151 prevtags = fp.read()
151 152
152 153 # local tags are stored in the current charset
153 154 writetag(fp, name, None, prevtags)
154 155 self.hook('tag', node=hex(node), tag=name, local=local)
155 156 return
156 157
157 158 if use_dirstate:
158 159 try:
159 160 fp = self.wfile('.hgtags', 'rb+')
160 161 except IOError, err:
161 162 fp = self.wfile('.hgtags', 'ab')
162 163 else:
163 164 prevtags = fp.read()
164 165 else:
165 166 try:
166 167 prevtags = self.filectx('.hgtags', parent).data()
167 168 except revlog.LookupError:
168 169 pass
169 170 fp = self.wfile('.hgtags', 'wb')
170 171 if prevtags:
171 172 fp.write(prevtags)
172 173
173 174 # committed tags are stored in UTF-8
174 175 writetag(fp, name, util.fromlocal, prevtags)
175 176
176 177 if use_dirstate and '.hgtags' not in self.dirstate:
177 178 self.add(['.hgtags'])
178 179
179 180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 181 extra=extra)
181 182
182 183 self.hook('tag', node=hex(node), tag=name, local=local)
183 184
184 185 return tagnode
185 186
186 187 def tag(self, name, node, message, local, user, date):
187 188 '''tag a revision with a symbolic name.
188 189
189 190 if local is True, the tag is stored in a per-repository file.
190 191 otherwise, it is stored in the .hgtags file, and a new
191 192 changeset is committed with the change.
192 193
193 194 keyword arguments:
194 195
195 196 local: whether to store tag in non-version-controlled file
196 197 (default False)
197 198
198 199 message: commit message to use if committing
199 200
200 201 user: name of user to use if committing
201 202
202 203 date: date tuple to use if committing'''
203 204
204 205 for x in self.status()[:5]:
205 206 if '.hgtags' in x:
206 207 raise util.Abort(_('working copy of .hgtags is changed '
207 208 '(please commit .hgtags manually)'))
208 209
209 210
210 211 self._tag(name, node, message, local, user, date)
211 212
212 213 def tags(self):
213 214 '''return a mapping of tag to node'''
214 215 if self.tagscache:
215 216 return self.tagscache
216 217
217 218 globaltags = {}
218 219 tagtypes = {}
219 220
220 221 def readtags(lines, fn, tagtype):
221 222 filetags = {}
222 223 count = 0
223 224
224 225 def warn(msg):
225 226 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226 227
227 228 for l in lines:
228 229 count += 1
229 230 if not l:
230 231 continue
231 232 s = l.split(" ", 1)
232 233 if len(s) != 2:
233 234 warn(_("cannot parse entry"))
234 235 continue
235 236 node, key = s
236 237 key = util.tolocal(key.strip()) # stored in UTF-8
237 238 try:
238 239 bin_n = bin(node)
239 240 except TypeError:
240 241 warn(_("node '%s' is not well formed") % node)
241 242 continue
242 243 if bin_n not in self.changelog.nodemap:
243 244 warn(_("tag '%s' refers to unknown node") % key)
244 245 continue
245 246
246 247 h = []
247 248 if key in filetags:
248 249 n, h = filetags[key]
249 250 h.append(n)
250 251 filetags[key] = (bin_n, h)
251 252
252 253 for k, nh in filetags.items():
253 254 if k not in globaltags:
254 255 globaltags[k] = nh
255 256 tagtypes[k] = tagtype
256 257 continue
257 258
258 259 # we prefer the global tag if:
259 260 # it supercedes us OR
260 261 # mutual supercedes and it has a higher rank
261 262 # otherwise we win because we're tip-most
262 263 an, ah = nh
263 264 bn, bh = globaltags[k]
264 265 if (bn != an and an in bh and
265 266 (bn not in ah or len(bh) > len(ah))):
266 267 an = bn
267 268 ah.extend([n for n in bh if n not in ah])
268 269 globaltags[k] = an, ah
269 270 tagtypes[k] = tagtype
270 271
271 272 # read the tags file from each head, ending with the tip
272 273 f = None
273 274 for rev, node, fnode in self._hgtagsnodes():
274 275 f = (f and f.filectx(fnode) or
275 276 self.filectx('.hgtags', fileid=fnode))
276 277 readtags(f.data().splitlines(), f, "global")
277 278
278 279 try:
279 280 data = util.fromlocal(self.opener("localtags").read())
280 281 # localtags are stored in the local character set
281 282 # while the internal tag table is stored in UTF-8
282 283 readtags(data.splitlines(), "localtags", "local")
283 284 except IOError:
284 285 pass
285 286
286 287 self.tagscache = {}
287 288 self._tagstypecache = {}
288 289 for k,nh in globaltags.items():
289 290 n = nh[0]
290 291 if n != nullid:
291 292 self.tagscache[k] = n
292 293 self._tagstypecache[k] = tagtypes[k]
293 294 self.tagscache['tip'] = self.changelog.tip()
294 295
295 296 return self.tagscache
296 297
297 298 def tagtype(self, tagname):
298 299 '''
299 300 return the type of the given tag. result can be:
300 301
301 302 'local' : a local tag
302 303 'global' : a global tag
303 304 None : tag does not exist
304 305 '''
305 306
306 307 self.tags()
307 308
308 309 return self._tagstypecache.get(tagname)
309 310
310 311 def _hgtagsnodes(self):
311 312 heads = self.heads()
312 313 heads.reverse()
313 314 last = {}
314 315 ret = []
315 316 for node in heads:
316 317 c = self.changectx(node)
317 318 rev = c.rev()
318 319 try:
319 320 fnode = c.filenode('.hgtags')
320 321 except revlog.LookupError:
321 322 continue
322 323 ret.append((rev, node, fnode))
323 324 if fnode in last:
324 325 ret[last[fnode]] = None
325 326 last[fnode] = len(ret) - 1
326 327 return [item for item in ret if item]
327 328
328 329 def tagslist(self):
329 330 '''return a list of tags ordered by revision'''
330 331 l = []
331 332 for t, n in self.tags().items():
332 333 try:
333 334 r = self.changelog.rev(n)
334 335 except:
335 336 r = -2 # sort to the beginning of the list if unknown
336 337 l.append((r, t, n))
337 338 l.sort()
338 339 return [(t, n) for r, t, n in l]
339 340
340 341 def nodetags(self, node):
341 342 '''return the tags associated with a node'''
342 343 if not self.nodetagscache:
343 344 self.nodetagscache = {}
344 345 for t, n in self.tags().items():
345 346 self.nodetagscache.setdefault(n, []).append(t)
346 347 return self.nodetagscache.get(node, [])
347 348
348 349 def _branchtags(self, partial, lrev):
349 350 tiprev = self.changelog.count() - 1
350 351 if lrev != tiprev:
351 352 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 354
354 355 return partial
355 356
356 357 def branchtags(self):
357 if self.branchcache is not None:
358 tip = self.changelog.tip()
359 if self.branchcache is not None and self._branchcachetip == tip:
358 360 return self.branchcache
359 361
362 oldtip = self._branchcachetip
363 self._branchcachetip = tip
364 if self.branchcache is None:
360 365 self.branchcache = {} # avoid recursion in changectx
366 else:
367 self.branchcache.clear() # keep using the same dict
368 if oldtip is None or oldtip not in self.changelog.nodemap:
361 369 partial, last, lrev = self._readbranchcache()
370 else:
371 lrev = self.changelog.rev(oldtip)
372 partial = self._ubranchcache
373
362 374 self._branchtags(partial, lrev)
363 375
364 376 # the branch cache is stored on disk as UTF-8, but in the local
365 377 # charset internally
366 378 for k, v in partial.items():
367 379 self.branchcache[util.tolocal(k)] = v
368 380 self._ubranchcache = partial
369 381 return self.branchcache
370 382
371 383 def _readbranchcache(self):
372 384 partial = {}
373 385 try:
374 386 f = self.opener("branch.cache")
375 387 lines = f.read().split('\n')
376 388 f.close()
377 389 except (IOError, OSError):
378 390 return {}, nullid, nullrev
379 391
380 392 try:
381 393 last, lrev = lines.pop(0).split(" ", 1)
382 394 last, lrev = bin(last), int(lrev)
383 395 if not (lrev < self.changelog.count() and
384 396 self.changelog.node(lrev) == last): # sanity check
385 397 # invalidate the cache
386 398 raise ValueError('invalidating branch cache (tip differs)')
387 399 for l in lines:
388 400 if not l: continue
389 401 node, label = l.split(" ", 1)
390 402 partial[label.strip()] = bin(node)
391 403 except (KeyboardInterrupt, util.SignalInterrupt):
392 404 raise
393 405 except Exception, inst:
394 406 if self.ui.debugflag:
395 407 self.ui.warn(str(inst), '\n')
396 408 partial, last, lrev = {}, nullid, nullrev
397 409 return partial, last, lrev
398 410
399 411 def _writebranchcache(self, branches, tip, tiprev):
400 412 try:
401 413 f = self.opener("branch.cache", "w", atomictemp=True)
402 414 f.write("%s %s\n" % (hex(tip), tiprev))
403 415 for label, node in branches.iteritems():
404 416 f.write("%s %s\n" % (hex(node), label))
405 417 f.rename()
406 418 except (IOError, OSError):
407 419 pass
408 420
409 421 def _updatebranchcache(self, partial, start, end):
410 422 for r in xrange(start, end):
411 423 c = self.changectx(r)
412 424 b = c.branch()
413 425 partial[b] = c.node()
414 426
415 427 def lookup(self, key):
416 428 if key == '.':
417 429 key, second = self.dirstate.parents()
418 430 if key == nullid:
419 431 raise repo.RepoError(_("no revision checked out"))
420 432 if second != nullid:
421 433 self.ui.warn(_("warning: working directory has two parents, "
422 434 "tag '.' uses the first\n"))
423 435 elif key == 'null':
424 436 return nullid
425 437 n = self.changelog._match(key)
426 438 if n:
427 439 return n
428 440 if key in self.tags():
429 441 return self.tags()[key]
430 442 if key in self.branchtags():
431 443 return self.branchtags()[key]
432 444 n = self.changelog._partialmatch(key)
433 445 if n:
434 446 return n
435 447 try:
436 448 if len(key) == 20:
437 449 key = hex(key)
438 450 except:
439 451 pass
440 452 raise repo.RepoError(_("unknown revision '%s'") % key)
441 453
442 454 def dev(self):
443 455 return os.lstat(self.path).st_dev
444 456
445 457 def local(self):
446 458 return True
447 459
448 460 def join(self, f):
449 461 return os.path.join(self.path, f)
450 462
451 463 def sjoin(self, f):
452 464 f = self.encodefn(f)
453 465 return os.path.join(self.spath, f)
454 466
455 467 def wjoin(self, f):
456 468 return os.path.join(self.root, f)
457 469
458 470 def file(self, f):
459 471 if f[0] == '/':
460 472 f = f[1:]
461 473 return filelog.filelog(self.sopener, f)
462 474
463 475 def changectx(self, changeid=None):
464 476 return context.changectx(self, changeid)
465 477
466 478 def workingctx(self):
467 479 return context.workingctx(self)
468 480
469 481 def parents(self, changeid=None):
470 482 '''
471 483 get list of changectxs for parents of changeid or working directory
472 484 '''
473 485 if changeid is None:
474 486 pl = self.dirstate.parents()
475 487 else:
476 488 n = self.changelog.lookup(changeid)
477 489 pl = self.changelog.parents(n)
478 490 if pl[1] == nullid:
479 491 return [self.changectx(pl[0])]
480 492 return [self.changectx(pl[0]), self.changectx(pl[1])]
481 493
482 494 def filectx(self, path, changeid=None, fileid=None):
483 495 """changeid can be a changeset revision, node, or tag.
484 496 fileid can be a file revision or node."""
485 497 return context.filectx(self, path, changeid, fileid)
486 498
487 499 def getcwd(self):
488 500 return self.dirstate.getcwd()
489 501
490 502 def pathto(self, f, cwd=None):
491 503 return self.dirstate.pathto(f, cwd)
492 504
493 505 def wfile(self, f, mode='r'):
494 506 return self.wopener(f, mode)
495 507
496 508 def _link(self, f):
497 509 return os.path.islink(self.wjoin(f))
498 510
499 511 def _filter(self, filter, filename, data):
500 512 if filter not in self.filterpats:
501 513 l = []
502 514 for pat, cmd in self.ui.configitems(filter):
503 515 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 516 fn = None
505 517 params = cmd
506 518 for name, filterfn in self._datafilters.iteritems():
507 519 if cmd.startswith(name):
508 520 fn = filterfn
509 521 params = cmd[len(name):].lstrip()
510 522 break
511 523 if not fn:
512 524 fn = lambda s, c, **kwargs: util.filter(s, c)
513 525 # Wrap old filters not supporting keyword arguments
514 526 if not inspect.getargspec(fn)[2]:
515 527 oldfn = fn
516 528 fn = lambda s, c, **kwargs: oldfn(s, c)
517 529 l.append((mf, fn, params))
518 530 self.filterpats[filter] = l
519 531
520 532 for mf, fn, cmd in self.filterpats[filter]:
521 533 if mf(filename):
522 534 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
523 535 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
524 536 break
525 537
526 538 return data
527 539
528 540 def adddatafilter(self, name, filter):
529 541 self._datafilters[name] = filter
530 542
531 543 def wread(self, filename):
532 544 if self._link(filename):
533 545 data = os.readlink(self.wjoin(filename))
534 546 else:
535 547 data = self.wopener(filename, 'r').read()
536 548 return self._filter("encode", filename, data)
537 549
538 550 def wwrite(self, filename, data, flags):
539 551 data = self._filter("decode", filename, data)
540 552 try:
541 553 os.unlink(self.wjoin(filename))
542 554 except OSError:
543 555 pass
544 556 self.wopener(filename, 'w').write(data)
545 557 util.set_flags(self.wjoin(filename), flags)
546 558
547 559 def wwritedata(self, filename, data):
548 560 return self._filter("decode", filename, data)
549 561
550 562 def transaction(self):
551 563 if self._transref and self._transref():
552 564 return self._transref().nest()
553 565
554 566 # abort here if the journal already exists
555 567 if os.path.exists(self.sjoin("journal")):
556 568 raise repo.RepoError(_("journal already exists - run hg recover"))
557 569
558 570 # save dirstate for rollback
559 571 try:
560 572 ds = self.opener("dirstate").read()
561 573 except IOError:
562 574 ds = ""
563 575 self.opener("journal.dirstate", "w").write(ds)
564 576 self.opener("journal.branch", "w").write(self.dirstate.branch())
565 577
566 578 renames = [(self.sjoin("journal"), self.sjoin("undo")),
567 579 (self.join("journal.dirstate"), self.join("undo.dirstate")),
568 580 (self.join("journal.branch"), self.join("undo.branch"))]
569 581 tr = transaction.transaction(self.ui.warn, self.sopener,
570 582 self.sjoin("journal"),
571 583 aftertrans(renames),
572 584 self._createmode)
573 585 self._transref = weakref.ref(tr)
574 586 return tr
575 587
576 588 def recover(self):
577 589 l = self.lock()
578 590 try:
579 591 if os.path.exists(self.sjoin("journal")):
580 592 self.ui.status(_("rolling back interrupted transaction\n"))
581 593 transaction.rollback(self.sopener, self.sjoin("journal"))
582 594 self.invalidate()
583 595 return True
584 596 else:
585 597 self.ui.warn(_("no interrupted transaction available\n"))
586 598 return False
587 599 finally:
588 600 del l
589 601
590 602 def rollback(self):
591 603 wlock = lock = None
592 604 try:
593 605 wlock = self.wlock()
594 606 lock = self.lock()
595 607 if os.path.exists(self.sjoin("undo")):
596 608 self.ui.status(_("rolling back last transaction\n"))
597 609 transaction.rollback(self.sopener, self.sjoin("undo"))
598 610 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
599 611 try:
600 612 branch = self.opener("undo.branch").read()
601 613 self.dirstate.setbranch(branch)
602 614 except IOError:
603 615 self.ui.warn(_("Named branch could not be reset, "
604 616 "current branch still is: %s\n")
605 617 % util.tolocal(self.dirstate.branch()))
606 618 self.invalidate()
607 619 self.dirstate.invalidate()
608 620 else:
609 621 self.ui.warn(_("no rollback information available\n"))
610 622 finally:
611 623 del lock, wlock
612 624
613 625 def invalidate(self):
614 626 for a in "changelog manifest".split():
615 627 if hasattr(self, a):
616 628 self.__delattr__(a)
617 629 self.tagscache = None
618 630 self._tagstypecache = None
619 631 self.nodetagscache = None
620 632 self.branchcache = None
621 633 self._ubranchcache = None
634 self._branchcachetip = None
622 635
623 636 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
624 637 try:
625 638 l = lock.lock(lockname, 0, releasefn, desc=desc)
626 639 except lock.LockHeld, inst:
627 640 if not wait:
628 641 raise
629 642 self.ui.warn(_("waiting for lock on %s held by %r\n") %
630 643 (desc, inst.locker))
631 644 # default to 600 seconds timeout
632 645 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
633 646 releasefn, desc=desc)
634 647 if acquirefn:
635 648 acquirefn()
636 649 return l
637 650
638 651 def lock(self, wait=True):
639 652 if self._lockref and self._lockref():
640 653 return self._lockref()
641 654
642 655 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
643 656 _('repository %s') % self.origroot)
644 657 self._lockref = weakref.ref(l)
645 658 return l
646 659
647 660 def wlock(self, wait=True):
648 661 if self._wlockref and self._wlockref():
649 662 return self._wlockref()
650 663
651 664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
652 665 self.dirstate.invalidate, _('working directory of %s') %
653 666 self.origroot)
654 667 self._wlockref = weakref.ref(l)
655 668 return l
656 669
657 670 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
658 671 """
659 672 commit an individual file as part of a larger transaction
660 673 """
661 674
662 675 t = self.wread(fn)
663 676 fl = self.file(fn)
664 677 fp1 = manifest1.get(fn, nullid)
665 678 fp2 = manifest2.get(fn, nullid)
666 679
667 680 meta = {}
668 681 cp = self.dirstate.copied(fn)
669 682 if cp:
670 683 # Mark the new revision of this file as a copy of another
671 684 # file. This copy data will effectively act as a parent
672 685 # of this new revision. If this is a merge, the first
673 686 # parent will be the nullid (meaning "look up the copy data")
674 687 # and the second one will be the other parent. For example:
675 688 #
676 689 # 0 --- 1 --- 3 rev1 changes file foo
677 690 # \ / rev2 renames foo to bar and changes it
678 691 # \- 2 -/ rev3 should have bar with all changes and
679 692 # should record that bar descends from
680 693 # bar in rev2 and foo in rev1
681 694 #
682 695 # this allows this merge to succeed:
683 696 #
684 697 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
685 698 # \ / merging rev3 and rev4 should use bar@rev2
686 699 # \- 2 --- 4 as the merge base
687 700 #
688 701 meta["copy"] = cp
689 702 if not manifest2: # not a branch merge
690 703 meta["copyrev"] = hex(manifest1.get(cp, nullid))
691 704 fp2 = nullid
692 705 elif fp2 != nullid: # copied on remote side
693 706 meta["copyrev"] = hex(manifest1.get(cp, nullid))
694 707 elif fp1 != nullid: # copied on local side, reversed
695 708 meta["copyrev"] = hex(manifest2.get(cp))
696 709 fp2 = fp1
697 710 elif cp in manifest2: # directory rename on local side
698 711 meta["copyrev"] = hex(manifest2[cp])
699 712 else: # directory rename on remote side
700 713 meta["copyrev"] = hex(manifest1.get(cp, nullid))
701 714 self.ui.debug(_(" %s: copy %s:%s\n") %
702 715 (fn, cp, meta["copyrev"]))
703 716 fp1 = nullid
704 717 elif fp2 != nullid:
705 718 # is one parent an ancestor of the other?
706 719 fpa = fl.ancestor(fp1, fp2)
707 720 if fpa == fp1:
708 721 fp1, fp2 = fp2, nullid
709 722 elif fpa == fp2:
710 723 fp2 = nullid
711 724
712 725 # is the file unmodified from the parent? report existing entry
713 726 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
714 727 return fp1
715 728
716 729 changelist.append(fn)
717 730 return fl.add(t, meta, tr, linkrev, fp1, fp2)
718 731
719 732 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
720 733 if p1 is None:
721 734 p1, p2 = self.dirstate.parents()
722 735 return self.commit(files=files, text=text, user=user, date=date,
723 736 p1=p1, p2=p2, extra=extra, empty_ok=True)
724 737
725 738 def commit(self, files=None, text="", user=None, date=None,
726 739 match=util.always, force=False, force_editor=False,
727 740 p1=None, p2=None, extra={}, empty_ok=False):
728 741 wlock = lock = tr = None
729 742 valid = 0 # don't save the dirstate if this isn't set
730 743 if files:
731 744 files = util.unique(files)
732 745 try:
733 746 commit = []
734 747 remove = []
735 748 changed = []
736 749 use_dirstate = (p1 is None) # not rawcommit
737 750 extra = extra.copy()
738 751
739 752 if use_dirstate:
740 753 if files:
741 754 for f in files:
742 755 s = self.dirstate[f]
743 756 if s in 'nma':
744 757 commit.append(f)
745 758 elif s == 'r':
746 759 remove.append(f)
747 760 else:
748 761 self.ui.warn(_("%s not tracked!\n") % f)
749 762 else:
750 763 changes = self.status(match=match)[:5]
751 764 modified, added, removed, deleted, unknown = changes
752 765 commit = modified + added
753 766 remove = removed
754 767 else:
755 768 commit = files
756 769
757 770 if use_dirstate:
758 771 p1, p2 = self.dirstate.parents()
759 772 update_dirstate = True
760 773 else:
761 774 p1, p2 = p1, p2 or nullid
762 775 update_dirstate = (self.dirstate.parents()[0] == p1)
763 776
764 777 c1 = self.changelog.read(p1)
765 778 c2 = self.changelog.read(p2)
766 779 m1 = self.manifest.read(c1[0]).copy()
767 780 m2 = self.manifest.read(c2[0])
768 781
769 782 if use_dirstate:
770 783 branchname = self.workingctx().branch()
771 784 try:
772 785 branchname = branchname.decode('UTF-8').encode('UTF-8')
773 786 except UnicodeDecodeError:
774 787 raise util.Abort(_('branch name not in UTF-8!'))
775 788 else:
776 789 branchname = ""
777 790
778 791 if use_dirstate:
779 792 oldname = c1[5].get("branch") # stored in UTF-8
780 793 if (not commit and not remove and not force and p2 == nullid
781 794 and branchname == oldname):
782 795 self.ui.status(_("nothing changed\n"))
783 796 return None
784 797
785 798 xp1 = hex(p1)
786 799 if p2 == nullid: xp2 = ''
787 800 else: xp2 = hex(p2)
788 801
789 802 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
790 803
791 804 wlock = self.wlock()
792 805 lock = self.lock()
793 806 tr = self.transaction()
794 807 trp = weakref.proxy(tr)
795 808
796 809 # check in files
797 810 new = {}
798 811 linkrev = self.changelog.count()
799 812 commit.sort()
800 813 is_exec = util.execfunc(self.root, m1.execf)
801 814 is_link = util.linkfunc(self.root, m1.linkf)
802 815 for f in commit:
803 816 self.ui.note(f + "\n")
804 817 try:
805 818 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
806 819 new_exec = is_exec(f)
807 820 new_link = is_link(f)
808 821 if ((not changed or changed[-1] != f) and
809 822 m2.get(f) != new[f]):
810 823 # mention the file in the changelog if some
811 824 # flag changed, even if there was no content
812 825 # change.
813 826 old_exec = m1.execf(f)
814 827 old_link = m1.linkf(f)
815 828 if old_exec != new_exec or old_link != new_link:
816 829 changed.append(f)
817 830 m1.set(f, new_exec, new_link)
818 831 if use_dirstate:
819 832 self.dirstate.normal(f)
820 833
821 834 except (OSError, IOError):
822 835 if use_dirstate:
823 836 self.ui.warn(_("trouble committing %s!\n") % f)
824 837 raise
825 838 else:
826 839 remove.append(f)
827 840
828 841 # update manifest
829 842 m1.update(new)
830 843 remove.sort()
831 844 removed = []
832 845
833 846 for f in remove:
834 847 if f in m1:
835 848 del m1[f]
836 849 removed.append(f)
837 850 elif f in m2:
838 851 removed.append(f)
839 852 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
840 853 (new, removed))
841 854
842 855 # add changeset
843 856 new = new.keys()
844 857 new.sort()
845 858
846 859 user = user or self.ui.username()
847 860 if (not empty_ok and not text) or force_editor:
848 861 edittext = []
849 862 if text:
850 863 edittext.append(text)
851 864 edittext.append("")
852 865 edittext.append(_("HG: Enter commit message."
853 866 " Lines beginning with 'HG:' are removed."))
854 867 edittext.append("HG: --")
855 868 edittext.append("HG: user: %s" % user)
856 869 if p2 != nullid:
857 870 edittext.append("HG: branch merge")
858 871 if branchname:
859 872 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
860 873 edittext.extend(["HG: changed %s" % f for f in changed])
861 874 edittext.extend(["HG: removed %s" % f for f in removed])
862 875 if not changed and not remove:
863 876 edittext.append("HG: no files changed")
864 877 edittext.append("")
865 878 # run editor in the repository root
866 879 olddir = os.getcwd()
867 880 os.chdir(self.root)
868 881 text = self.ui.edit("\n".join(edittext), user)
869 882 os.chdir(olddir)
870 883
871 884 if branchname:
872 885 extra["branch"] = branchname
873 886
874 887 if use_dirstate:
875 888 lines = [line.rstrip() for line in text.rstrip().splitlines()]
876 889 while lines and not lines[0]:
877 890 del lines[0]
878 891 if not lines:
879 892 raise util.Abort(_("empty commit message"))
880 893 text = '\n'.join(lines)
881 894
882 895 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
883 896 user, date, extra)
884 897 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
885 898 parent2=xp2)
886 899 tr.close()
887 900
888 if self.branchcache and "branch" in extra:
889 self.branchcache[util.tolocal(extra["branch"])] = n
901 if self.branchcache:
902 self.branchtags()
890 903
891 904 if use_dirstate or update_dirstate:
892 905 self.dirstate.setparents(n)
893 906 if use_dirstate:
894 907 for f in removed:
895 908 self.dirstate.forget(f)
896 909 valid = 1 # our dirstate updates are complete
897 910
898 911 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
899 912 return n
900 913 finally:
901 914 if not valid: # don't save our updated dirstate
902 915 self.dirstate.invalidate()
903 916 del tr, lock, wlock
904 917
905 918 def walk(self, node=None, files=[], match=util.always, badmatch=None):
906 919 '''
907 920 walk recursively through the directory tree or a given
908 921 changeset, finding all files matched by the match
909 922 function
910 923
911 924 results are yielded in a tuple (src, filename), where src
912 925 is one of:
913 926 'f' the file was found in the directory tree
914 927 'm' the file was only in the dirstate and not in the tree
915 928 'b' file was not found and matched badmatch
916 929 '''
917 930
918 931 if node:
919 932 fdict = dict.fromkeys(files)
920 933 # for dirstate.walk, files=['.'] means "walk the whole tree".
921 934 # follow that here, too
922 935 fdict.pop('.', None)
923 936 mdict = self.manifest.read(self.changelog.read(node)[0])
924 937 mfiles = mdict.keys()
925 938 mfiles.sort()
926 939 for fn in mfiles:
927 940 for ffn in fdict:
928 941 # match if the file is the exact name or a directory
929 942 if ffn == fn or fn.startswith("%s/" % ffn):
930 943 del fdict[ffn]
931 944 break
932 945 if match(fn):
933 946 yield 'm', fn
934 947 ffiles = fdict.keys()
935 948 ffiles.sort()
936 949 for fn in ffiles:
937 950 if badmatch and badmatch(fn):
938 951 if match(fn):
939 952 yield 'b', fn
940 953 else:
941 954 self.ui.warn(_('%s: No such file in rev %s\n')
942 955 % (self.pathto(fn), short(node)))
943 956 else:
944 957 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
945 958 yield src, fn
946 959
947 960 def status(self, node1=None, node2=None, files=[], match=util.always,
948 961 list_ignored=False, list_clean=False):
949 962 """return status of files between two nodes or node and working directory
950 963
951 964 If node1 is None, use the first dirstate parent instead.
952 965 If node2 is None, compare node1 with working directory.
953 966 """
954 967
955 968 def fcmp(fn, getnode):
956 969 t1 = self.wread(fn)
957 970 return self.file(fn).cmp(getnode(fn), t1)
958 971
959 972 def mfmatches(node):
960 973 change = self.changelog.read(node)
961 974 mf = self.manifest.read(change[0]).copy()
962 975 for fn in mf.keys():
963 976 if not match(fn):
964 977 del mf[fn]
965 978 return mf
966 979
967 980 modified, added, removed, deleted, unknown = [], [], [], [], []
968 981 ignored, clean = [], []
969 982
970 983 compareworking = False
971 984 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
972 985 compareworking = True
973 986
974 987 if not compareworking:
975 988 # read the manifest from node1 before the manifest from node2,
976 989 # so that we'll hit the manifest cache if we're going through
977 990 # all the revisions in parent->child order.
978 991 mf1 = mfmatches(node1)
979 992
980 993 # are we comparing the working directory?
981 994 if not node2:
982 995 (lookup, modified, added, removed, deleted, unknown,
983 996 ignored, clean) = self.dirstate.status(files, match,
984 997 list_ignored, list_clean)
985 998
986 999 # are we comparing working dir against its parent?
987 1000 if compareworking:
988 1001 if lookup:
989 1002 fixup = []
990 1003 # do a full compare of any files that might have changed
991 1004 ctx = self.changectx()
992 1005 for f in lookup:
993 1006 if f not in ctx or ctx[f].cmp(self.wread(f)):
994 1007 modified.append(f)
995 1008 else:
996 1009 fixup.append(f)
997 1010 if list_clean:
998 1011 clean.append(f)
999 1012
1000 1013 # update dirstate for files that are actually clean
1001 1014 if fixup:
1002 1015 wlock = None
1003 1016 try:
1004 1017 try:
1005 1018 wlock = self.wlock(False)
1006 1019 except lock.LockException:
1007 1020 pass
1008 1021 if wlock:
1009 1022 for f in fixup:
1010 1023 self.dirstate.normal(f)
1011 1024 finally:
1012 1025 del wlock
1013 1026 else:
1014 1027 # we are comparing working dir against non-parent
1015 1028 # generate a pseudo-manifest for the working dir
1016 1029 # XXX: create it in dirstate.py ?
1017 1030 mf2 = mfmatches(self.dirstate.parents()[0])
1018 1031 is_exec = util.execfunc(self.root, mf2.execf)
1019 1032 is_link = util.linkfunc(self.root, mf2.linkf)
1020 1033 for f in lookup + modified + added:
1021 1034 mf2[f] = ""
1022 1035 mf2.set(f, is_exec(f), is_link(f))
1023 1036 for f in removed:
1024 1037 if f in mf2:
1025 1038 del mf2[f]
1026 1039
1027 1040 else:
1028 1041 # we are comparing two revisions
1029 1042 mf2 = mfmatches(node2)
1030 1043
1031 1044 if not compareworking:
1032 1045 # flush lists from dirstate before comparing manifests
1033 1046 modified, added, clean = [], [], []
1034 1047
1035 1048 # make sure to sort the files so we talk to the disk in a
1036 1049 # reasonable order
1037 1050 mf2keys = mf2.keys()
1038 1051 mf2keys.sort()
1039 1052 getnode = lambda fn: mf1.get(fn, nullid)
1040 1053 for fn in mf2keys:
1041 1054 if fn in mf1:
1042 1055 if (mf1.flags(fn) != mf2.flags(fn) or
1043 1056 (mf1[fn] != mf2[fn] and
1044 1057 (mf2[fn] != "" or fcmp(fn, getnode)))):
1045 1058 modified.append(fn)
1046 1059 elif list_clean:
1047 1060 clean.append(fn)
1048 1061 del mf1[fn]
1049 1062 else:
1050 1063 added.append(fn)
1051 1064
1052 1065 removed = mf1.keys()
1053 1066
1054 1067 # sort and return results:
1055 1068 for l in modified, added, removed, deleted, unknown, ignored, clean:
1056 1069 l.sort()
1057 1070 return (modified, added, removed, deleted, unknown, ignored, clean)
1058 1071
1059 1072 def add(self, list):
1060 1073 wlock = self.wlock()
1061 1074 try:
1062 1075 rejected = []
1063 1076 for f in list:
1064 1077 p = self.wjoin(f)
1065 1078 try:
1066 1079 st = os.lstat(p)
1067 1080 except:
1068 1081 self.ui.warn(_("%s does not exist!\n") % f)
1069 1082 rejected.append(f)
1070 1083 continue
1071 1084 if st.st_size > 10000000:
1072 1085 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 1086 " performance problems\n"
1074 1087 "(use 'hg revert %s' to unadd the file)\n")
1075 1088 % (f, f))
1076 1089 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 1090 self.ui.warn(_("%s not added: only files and symlinks "
1078 1091 "supported currently\n") % f)
1079 1092 rejected.append(p)
1080 1093 elif self.dirstate[f] in 'amn':
1081 1094 self.ui.warn(_("%s already tracked!\n") % f)
1082 1095 elif self.dirstate[f] == 'r':
1083 1096 self.dirstate.normallookup(f)
1084 1097 else:
1085 1098 self.dirstate.add(f)
1086 1099 return rejected
1087 1100 finally:
1088 1101 del wlock
1089 1102
1090 1103 def forget(self, list):
1091 1104 wlock = self.wlock()
1092 1105 try:
1093 1106 for f in list:
1094 1107 if self.dirstate[f] != 'a':
1095 1108 self.ui.warn(_("%s not added!\n") % f)
1096 1109 else:
1097 1110 self.dirstate.forget(f)
1098 1111 finally:
1099 1112 del wlock
1100 1113
1101 1114 def remove(self, list, unlink=False):
1102 1115 wlock = None
1103 1116 try:
1104 1117 if unlink:
1105 1118 for f in list:
1106 1119 try:
1107 1120 util.unlink(self.wjoin(f))
1108 1121 except OSError, inst:
1109 1122 if inst.errno != errno.ENOENT:
1110 1123 raise
1111 1124 wlock = self.wlock()
1112 1125 for f in list:
1113 1126 if unlink and os.path.exists(self.wjoin(f)):
1114 1127 self.ui.warn(_("%s still exists!\n") % f)
1115 1128 elif self.dirstate[f] == 'a':
1116 1129 self.dirstate.forget(f)
1117 1130 elif f not in self.dirstate:
1118 1131 self.ui.warn(_("%s not tracked!\n") % f)
1119 1132 else:
1120 1133 self.dirstate.remove(f)
1121 1134 finally:
1122 1135 del wlock
1123 1136
1124 1137 def undelete(self, list):
1125 1138 wlock = None
1126 1139 try:
1127 1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1128 1141 for p in self.dirstate.parents() if p != nullid]
1129 1142 wlock = self.wlock()
1130 1143 for f in list:
1131 1144 if self.dirstate[f] != 'r':
1132 1145 self.ui.warn("%s not removed!\n" % f)
1133 1146 else:
1134 1147 m = f in manifests[0] and manifests[0] or manifests[1]
1135 1148 t = self.file(f).read(m[f])
1136 1149 self.wwrite(f, t, m.flags(f))
1137 1150 self.dirstate.normal(f)
1138 1151 finally:
1139 1152 del wlock
1140 1153
1141 1154 def copy(self, source, dest):
1142 1155 wlock = None
1143 1156 try:
1144 1157 p = self.wjoin(dest)
1145 1158 if not (os.path.exists(p) or os.path.islink(p)):
1146 1159 self.ui.warn(_("%s does not exist!\n") % dest)
1147 1160 elif not (os.path.isfile(p) or os.path.islink(p)):
1148 1161 self.ui.warn(_("copy failed: %s is not a file or a "
1149 1162 "symbolic link\n") % dest)
1150 1163 else:
1151 1164 wlock = self.wlock()
1152 1165 if dest not in self.dirstate:
1153 1166 self.dirstate.add(dest)
1154 1167 self.dirstate.copy(source, dest)
1155 1168 finally:
1156 1169 del wlock
1157 1170
1158 1171 def heads(self, start=None):
1159 1172 heads = self.changelog.heads(start)
1160 1173 # sort the output in rev descending order
1161 1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1162 1175 heads.sort()
1163 1176 return [n for (r, n) in heads]
1164 1177
1165 1178 def branchheads(self, branch, start=None):
1166 1179 branches = self.branchtags()
1167 1180 if branch not in branches:
1168 1181 return []
1169 1182 # The basic algorithm is this:
1170 1183 #
1171 1184 # Start from the branch tip since there are no later revisions that can
1172 1185 # possibly be in this branch, and the tip is a guaranteed head.
1173 1186 #
1174 1187 # Remember the tip's parents as the first ancestors, since these by
1175 1188 # definition are not heads.
1176 1189 #
1177 1190 # Step backwards from the brach tip through all the revisions. We are
1178 1191 # guaranteed by the rules of Mercurial that we will now be visiting the
1179 1192 # nodes in reverse topological order (children before parents).
1180 1193 #
1181 1194 # If a revision is one of the ancestors of a head then we can toss it
1182 1195 # out of the ancestors set (we've already found it and won't be
1183 1196 # visiting it again) and put its parents in the ancestors set.
1184 1197 #
1185 1198 # Otherwise, if a revision is in the branch it's another head, since it
1186 1199 # wasn't in the ancestor list of an existing head. So add it to the
1187 1200 # head list, and add its parents to the ancestor list.
1188 1201 #
1189 1202 # If it is not in the branch ignore it.
1190 1203 #
1191 1204 # Once we have a list of heads, use nodesbetween to filter out all the
1192 1205 # heads that cannot be reached from startrev. There may be a more
1193 1206 # efficient way to do this as part of the previous algorithm.
1194 1207
1195 1208 set = util.set
1196 1209 heads = [self.changelog.rev(branches[branch])]
1197 1210 # Don't care if ancestors contains nullrev or not.
1198 1211 ancestors = set(self.changelog.parentrevs(heads[0]))
1199 1212 for rev in xrange(heads[0] - 1, nullrev, -1):
1200 1213 if rev in ancestors:
1201 1214 ancestors.update(self.changelog.parentrevs(rev))
1202 1215 ancestors.remove(rev)
1203 1216 elif self.changectx(rev).branch() == branch:
1204 1217 heads.append(rev)
1205 1218 ancestors.update(self.changelog.parentrevs(rev))
1206 1219 heads = [self.changelog.node(rev) for rev in heads]
1207 1220 if start is not None:
1208 1221 heads = self.changelog.nodesbetween([start], heads)[2]
1209 1222 return heads
1210 1223
1211 1224 def branches(self, nodes):
1212 1225 if not nodes:
1213 1226 nodes = [self.changelog.tip()]
1214 1227 b = []
1215 1228 for n in nodes:
1216 1229 t = n
1217 1230 while 1:
1218 1231 p = self.changelog.parents(n)
1219 1232 if p[1] != nullid or p[0] == nullid:
1220 1233 b.append((t, n, p[0], p[1]))
1221 1234 break
1222 1235 n = p[0]
1223 1236 return b
1224 1237
1225 1238 def between(self, pairs):
1226 1239 r = []
1227 1240
1228 1241 for top, bottom in pairs:
1229 1242 n, l, i = top, [], 0
1230 1243 f = 1
1231 1244
1232 1245 while n != bottom:
1233 1246 p = self.changelog.parents(n)[0]
1234 1247 if i == f:
1235 1248 l.append(n)
1236 1249 f = f * 2
1237 1250 n = p
1238 1251 i += 1
1239 1252
1240 1253 r.append(l)
1241 1254
1242 1255 return r
1243 1256
1244 1257 def findincoming(self, remote, base=None, heads=None, force=False):
1245 1258 """Return list of roots of the subsets of missing nodes from remote
1246 1259
1247 1260 If base dict is specified, assume that these nodes and their parents
1248 1261 exist on the remote side and that no child of a node of base exists
1249 1262 in both remote and self.
1250 1263 Furthermore base will be updated to include the nodes that exists
1251 1264 in self and remote but no children exists in self and remote.
1252 1265 If a list of heads is specified, return only nodes which are heads
1253 1266 or ancestors of these heads.
1254 1267
1255 1268 All the ancestors of base are in self and in remote.
1256 1269 All the descendants of the list returned are missing in self.
1257 1270 (and so we know that the rest of the nodes are missing in remote, see
1258 1271 outgoing)
1259 1272 """
1260 1273 m = self.changelog.nodemap
1261 1274 search = []
1262 1275 fetch = {}
1263 1276 seen = {}
1264 1277 seenbranch = {}
1265 1278 if base == None:
1266 1279 base = {}
1267 1280
1268 1281 if not heads:
1269 1282 heads = remote.heads()
1270 1283
1271 1284 if self.changelog.tip() == nullid:
1272 1285 base[nullid] = 1
1273 1286 if heads != [nullid]:
1274 1287 return [nullid]
1275 1288 return []
1276 1289
1277 1290 # assume we're closer to the tip than the root
1278 1291 # and start by examining the heads
1279 1292 self.ui.status(_("searching for changes\n"))
1280 1293
1281 1294 unknown = []
1282 1295 for h in heads:
1283 1296 if h not in m:
1284 1297 unknown.append(h)
1285 1298 else:
1286 1299 base[h] = 1
1287 1300
1288 1301 if not unknown:
1289 1302 return []
1290 1303
1291 1304 req = dict.fromkeys(unknown)
1292 1305 reqcnt = 0
1293 1306
1294 1307 # search through remote branches
1295 1308 # a 'branch' here is a linear segment of history, with four parts:
1296 1309 # head, root, first parent, second parent
1297 1310 # (a branch always has two parents (or none) by definition)
1298 1311 unknown = remote.branches(unknown)
1299 1312 while unknown:
1300 1313 r = []
1301 1314 while unknown:
1302 1315 n = unknown.pop(0)
1303 1316 if n[0] in seen:
1304 1317 continue
1305 1318
1306 1319 self.ui.debug(_("examining %s:%s\n")
1307 1320 % (short(n[0]), short(n[1])))
1308 1321 if n[0] == nullid: # found the end of the branch
1309 1322 pass
1310 1323 elif n in seenbranch:
1311 1324 self.ui.debug(_("branch already found\n"))
1312 1325 continue
1313 1326 elif n[1] and n[1] in m: # do we know the base?
1314 1327 self.ui.debug(_("found incomplete branch %s:%s\n")
1315 1328 % (short(n[0]), short(n[1])))
1316 1329 search.append(n) # schedule branch range for scanning
1317 1330 seenbranch[n] = 1
1318 1331 else:
1319 1332 if n[1] not in seen and n[1] not in fetch:
1320 1333 if n[2] in m and n[3] in m:
1321 1334 self.ui.debug(_("found new changeset %s\n") %
1322 1335 short(n[1]))
1323 1336 fetch[n[1]] = 1 # earliest unknown
1324 1337 for p in n[2:4]:
1325 1338 if p in m:
1326 1339 base[p] = 1 # latest known
1327 1340
1328 1341 for p in n[2:4]:
1329 1342 if p not in req and p not in m:
1330 1343 r.append(p)
1331 1344 req[p] = 1
1332 1345 seen[n[0]] = 1
1333 1346
1334 1347 if r:
1335 1348 reqcnt += 1
1336 1349 self.ui.debug(_("request %d: %s\n") %
1337 1350 (reqcnt, " ".join(map(short, r))))
1338 1351 for p in xrange(0, len(r), 10):
1339 1352 for b in remote.branches(r[p:p+10]):
1340 1353 self.ui.debug(_("received %s:%s\n") %
1341 1354 (short(b[0]), short(b[1])))
1342 1355 unknown.append(b)
1343 1356
1344 1357 # do binary search on the branches we found
1345 1358 while search:
1346 1359 n = search.pop(0)
1347 1360 reqcnt += 1
1348 1361 l = remote.between([(n[0], n[1])])[0]
1349 1362 l.append(n[1])
1350 1363 p = n[0]
1351 1364 f = 1
1352 1365 for i in l:
1353 1366 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1354 1367 if i in m:
1355 1368 if f <= 2:
1356 1369 self.ui.debug(_("found new branch changeset %s\n") %
1357 1370 short(p))
1358 1371 fetch[p] = 1
1359 1372 base[i] = 1
1360 1373 else:
1361 1374 self.ui.debug(_("narrowed branch search to %s:%s\n")
1362 1375 % (short(p), short(i)))
1363 1376 search.append((p, i))
1364 1377 break
1365 1378 p, f = i, f * 2
1366 1379
1367 1380 # sanity check our fetch list
1368 1381 for f in fetch.keys():
1369 1382 if f in m:
1370 1383 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1371 1384
1372 1385 if base.keys() == [nullid]:
1373 1386 if force:
1374 1387 self.ui.warn(_("warning: repository is unrelated\n"))
1375 1388 else:
1376 1389 raise util.Abort(_("repository is unrelated"))
1377 1390
1378 1391 self.ui.debug(_("found new changesets starting at ") +
1379 1392 " ".join([short(f) for f in fetch]) + "\n")
1380 1393
1381 1394 self.ui.debug(_("%d total queries\n") % reqcnt)
1382 1395
1383 1396 return fetch.keys()
1384 1397
1385 1398 def findoutgoing(self, remote, base=None, heads=None, force=False):
1386 1399 """Return list of nodes that are roots of subsets not in remote
1387 1400
1388 1401 If base dict is specified, assume that these nodes and their parents
1389 1402 exist on the remote side.
1390 1403 If a list of heads is specified, return only nodes which are heads
1391 1404 or ancestors of these heads, and return a second element which
1392 1405 contains all remote heads which get new children.
1393 1406 """
1394 1407 if base == None:
1395 1408 base = {}
1396 1409 self.findincoming(remote, base, heads, force=force)
1397 1410
1398 1411 self.ui.debug(_("common changesets up to ")
1399 1412 + " ".join(map(short, base.keys())) + "\n")
1400 1413
1401 1414 remain = dict.fromkeys(self.changelog.nodemap)
1402 1415
1403 1416 # prune everything remote has from the tree
1404 1417 del remain[nullid]
1405 1418 remove = base.keys()
1406 1419 while remove:
1407 1420 n = remove.pop(0)
1408 1421 if n in remain:
1409 1422 del remain[n]
1410 1423 for p in self.changelog.parents(n):
1411 1424 remove.append(p)
1412 1425
1413 1426 # find every node whose parents have been pruned
1414 1427 subset = []
1415 1428 # find every remote head that will get new children
1416 1429 updated_heads = {}
1417 1430 for n in remain:
1418 1431 p1, p2 = self.changelog.parents(n)
1419 1432 if p1 not in remain and p2 not in remain:
1420 1433 subset.append(n)
1421 1434 if heads:
1422 1435 if p1 in heads:
1423 1436 updated_heads[p1] = True
1424 1437 if p2 in heads:
1425 1438 updated_heads[p2] = True
1426 1439
1427 1440 # this is the set of all roots we have to push
1428 1441 if heads:
1429 1442 return subset, updated_heads.keys()
1430 1443 else:
1431 1444 return subset
1432 1445
1433 1446 def pull(self, remote, heads=None, force=False):
1434 1447 lock = self.lock()
1435 1448 try:
1436 1449 fetch = self.findincoming(remote, heads=heads, force=force)
1437 1450 if fetch == [nullid]:
1438 1451 self.ui.status(_("requesting all changes\n"))
1439 1452
1440 1453 if not fetch:
1441 1454 self.ui.status(_("no changes found\n"))
1442 1455 return 0
1443 1456
1444 1457 if heads is None:
1445 1458 cg = remote.changegroup(fetch, 'pull')
1446 1459 else:
1447 1460 if 'changegroupsubset' not in remote.capabilities:
1448 1461 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1449 1462 cg = remote.changegroupsubset(fetch, heads, 'pull')
1450 1463 return self.addchangegroup(cg, 'pull', remote.url())
1451 1464 finally:
1452 1465 del lock
1453 1466
1454 1467 def push(self, remote, force=False, revs=None):
1455 1468 # there are two ways to push to remote repo:
1456 1469 #
1457 1470 # addchangegroup assumes local user can lock remote
1458 1471 # repo (local filesystem, old ssh servers).
1459 1472 #
1460 1473 # unbundle assumes local user cannot lock remote repo (new ssh
1461 1474 # servers, http servers).
1462 1475
1463 1476 if remote.capable('unbundle'):
1464 1477 return self.push_unbundle(remote, force, revs)
1465 1478 return self.push_addchangegroup(remote, force, revs)
1466 1479
1467 1480 def prepush(self, remote, force, revs):
1468 1481 base = {}
1469 1482 remote_heads = remote.heads()
1470 1483 inc = self.findincoming(remote, base, remote_heads, force=force)
1471 1484
1472 1485 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1473 1486 if revs is not None:
1474 1487 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1475 1488 else:
1476 1489 bases, heads = update, self.changelog.heads()
1477 1490
1478 1491 if not bases:
1479 1492 self.ui.status(_("no changes found\n"))
1480 1493 return None, 1
1481 1494 elif not force:
1482 1495 # check if we're creating new remote heads
1483 1496 # to be a remote head after push, node must be either
1484 1497 # - unknown locally
1485 1498 # - a local outgoing head descended from update
1486 1499 # - a remote head that's known locally and not
1487 1500 # ancestral to an outgoing head
1488 1501
1489 1502 warn = 0
1490 1503
1491 1504 if remote_heads == [nullid]:
1492 1505 warn = 0
1493 1506 elif not revs and len(heads) > len(remote_heads):
1494 1507 warn = 1
1495 1508 else:
1496 1509 newheads = list(heads)
1497 1510 for r in remote_heads:
1498 1511 if r in self.changelog.nodemap:
1499 1512 desc = self.changelog.heads(r, heads)
1500 1513 l = [h for h in heads if h in desc]
1501 1514 if not l:
1502 1515 newheads.append(r)
1503 1516 else:
1504 1517 newheads.append(r)
1505 1518 if len(newheads) > len(remote_heads):
1506 1519 warn = 1
1507 1520
1508 1521 if warn:
1509 1522 self.ui.warn(_("abort: push creates new remote branches!\n"))
1510 1523 self.ui.status(_("(did you forget to merge?"
1511 1524 " use push -f to force)\n"))
1512 1525 return None, 1
1513 1526 elif inc:
1514 1527 self.ui.warn(_("note: unsynced remote changes!\n"))
1515 1528
1516 1529
1517 1530 if revs is None:
1518 1531 cg = self.changegroup(update, 'push')
1519 1532 else:
1520 1533 cg = self.changegroupsubset(update, revs, 'push')
1521 1534 return cg, remote_heads
1522 1535
1523 1536 def push_addchangegroup(self, remote, force, revs):
1524 1537 lock = remote.lock()
1525 1538 try:
1526 1539 ret = self.prepush(remote, force, revs)
1527 1540 if ret[0] is not None:
1528 1541 cg, remote_heads = ret
1529 1542 return remote.addchangegroup(cg, 'push', self.url())
1530 1543 return ret[1]
1531 1544 finally:
1532 1545 del lock
1533 1546
1534 1547 def push_unbundle(self, remote, force, revs):
1535 1548 # local repo finds heads on server, finds out what revs it
1536 1549 # must push. once revs transferred, if server finds it has
1537 1550 # different heads (someone else won commit/push race), server
1538 1551 # aborts.
1539 1552
1540 1553 ret = self.prepush(remote, force, revs)
1541 1554 if ret[0] is not None:
1542 1555 cg, remote_heads = ret
1543 1556 if force: remote_heads = ['force']
1544 1557 return remote.unbundle(cg, remote_heads, 'push')
1545 1558 return ret[1]
1546 1559
1547 1560 def changegroupinfo(self, nodes, source):
1548 1561 if self.ui.verbose or source == 'bundle':
1549 1562 self.ui.status(_("%d changesets found\n") % len(nodes))
1550 1563 if self.ui.debugflag:
1551 1564 self.ui.debug(_("List of changesets:\n"))
1552 1565 for node in nodes:
1553 1566 self.ui.debug("%s\n" % hex(node))
1554 1567
1555 1568 def changegroupsubset(self, bases, heads, source, extranodes=None):
1556 1569 """This function generates a changegroup consisting of all the nodes
1557 1570 that are descendents of any of the bases, and ancestors of any of
1558 1571 the heads.
1559 1572
1560 1573 It is fairly complex as determining which filenodes and which
1561 1574 manifest nodes need to be included for the changeset to be complete
1562 1575 is non-trivial.
1563 1576
1564 1577 Another wrinkle is doing the reverse, figuring out which changeset in
1565 1578 the changegroup a particular filenode or manifestnode belongs to.
1566 1579
1567 1580 The caller can specify some nodes that must be included in the
1568 1581 changegroup using the extranodes argument. It should be a dict
1569 1582 where the keys are the filenames (or 1 for the manifest), and the
1570 1583 values are lists of (node, linknode) tuples, where node is a wanted
1571 1584 node and linknode is the changelog node that should be transmitted as
1572 1585 the linkrev.
1573 1586 """
1574 1587
1575 1588 self.hook('preoutgoing', throw=True, source=source)
1576 1589
1577 1590 # Set up some initial variables
1578 1591 # Make it easy to refer to self.changelog
1579 1592 cl = self.changelog
1580 1593 # msng is short for missing - compute the list of changesets in this
1581 1594 # changegroup.
1582 1595 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1583 1596 self.changegroupinfo(msng_cl_lst, source)
1584 1597 # Some bases may turn out to be superfluous, and some heads may be
1585 1598 # too. nodesbetween will return the minimal set of bases and heads
1586 1599 # necessary to re-create the changegroup.
1587 1600
1588 1601 # Known heads are the list of heads that it is assumed the recipient
1589 1602 # of this changegroup will know about.
1590 1603 knownheads = {}
1591 1604 # We assume that all parents of bases are known heads.
1592 1605 for n in bases:
1593 1606 for p in cl.parents(n):
1594 1607 if p != nullid:
1595 1608 knownheads[p] = 1
1596 1609 knownheads = knownheads.keys()
1597 1610 if knownheads:
1598 1611 # Now that we know what heads are known, we can compute which
1599 1612 # changesets are known. The recipient must know about all
1600 1613 # changesets required to reach the known heads from the null
1601 1614 # changeset.
1602 1615 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1603 1616 junk = None
1604 1617 # Transform the list into an ersatz set.
1605 1618 has_cl_set = dict.fromkeys(has_cl_set)
1606 1619 else:
1607 1620 # If there were no known heads, the recipient cannot be assumed to
1608 1621 # know about any changesets.
1609 1622 has_cl_set = {}
1610 1623
1611 1624 # Make it easy to refer to self.manifest
1612 1625 mnfst = self.manifest
1613 1626 # We don't know which manifests are missing yet
1614 1627 msng_mnfst_set = {}
1615 1628 # Nor do we know which filenodes are missing.
1616 1629 msng_filenode_set = {}
1617 1630
1618 1631 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1619 1632 junk = None
1620 1633
1621 1634 # A changeset always belongs to itself, so the changenode lookup
1622 1635 # function for a changenode is identity.
1623 1636 def identity(x):
1624 1637 return x
1625 1638
1626 1639 # A function generating function. Sets up an environment for the
1627 1640 # inner function.
1628 1641 def cmp_by_rev_func(revlog):
1629 1642 # Compare two nodes by their revision number in the environment's
1630 1643 # revision history. Since the revision number both represents the
1631 1644 # most efficient order to read the nodes in, and represents a
1632 1645 # topological sorting of the nodes, this function is often useful.
1633 1646 def cmp_by_rev(a, b):
1634 1647 return cmp(revlog.rev(a), revlog.rev(b))
1635 1648 return cmp_by_rev
1636 1649
1637 1650 # If we determine that a particular file or manifest node must be a
1638 1651 # node that the recipient of the changegroup will already have, we can
1639 1652 # also assume the recipient will have all the parents. This function
1640 1653 # prunes them from the set of missing nodes.
1641 1654 def prune_parents(revlog, hasset, msngset):
1642 1655 haslst = hasset.keys()
1643 1656 haslst.sort(cmp_by_rev_func(revlog))
1644 1657 for node in haslst:
1645 1658 parentlst = [p for p in revlog.parents(node) if p != nullid]
1646 1659 while parentlst:
1647 1660 n = parentlst.pop()
1648 1661 if n not in hasset:
1649 1662 hasset[n] = 1
1650 1663 p = [p for p in revlog.parents(n) if p != nullid]
1651 1664 parentlst.extend(p)
1652 1665 for n in hasset:
1653 1666 msngset.pop(n, None)
1654 1667
1655 1668 # This is a function generating function used to set up an environment
1656 1669 # for the inner function to execute in.
1657 1670 def manifest_and_file_collector(changedfileset):
1658 1671 # This is an information gathering function that gathers
1659 1672 # information from each changeset node that goes out as part of
1660 1673 # the changegroup. The information gathered is a list of which
1661 1674 # manifest nodes are potentially required (the recipient may
1662 1675 # already have them) and total list of all files which were
1663 1676 # changed in any changeset in the changegroup.
1664 1677 #
1665 1678 # We also remember the first changenode we saw any manifest
1666 1679 # referenced by so we can later determine which changenode 'owns'
1667 1680 # the manifest.
1668 1681 def collect_manifests_and_files(clnode):
1669 1682 c = cl.read(clnode)
1670 1683 for f in c[3]:
1671 1684 # This is to make sure we only have one instance of each
1672 1685 # filename string for each filename.
1673 1686 changedfileset.setdefault(f, f)
1674 1687 msng_mnfst_set.setdefault(c[0], clnode)
1675 1688 return collect_manifests_and_files
1676 1689
1677 1690 # Figure out which manifest nodes (of the ones we think might be part
1678 1691 # of the changegroup) the recipient must know about and remove them
1679 1692 # from the changegroup.
1680 1693 def prune_manifests():
1681 1694 has_mnfst_set = {}
1682 1695 for n in msng_mnfst_set:
1683 1696 # If a 'missing' manifest thinks it belongs to a changenode
1684 1697 # the recipient is assumed to have, obviously the recipient
1685 1698 # must have that manifest.
1686 1699 linknode = cl.node(mnfst.linkrev(n))
1687 1700 if linknode in has_cl_set:
1688 1701 has_mnfst_set[n] = 1
1689 1702 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1690 1703
1691 1704 # Use the information collected in collect_manifests_and_files to say
1692 1705 # which changenode any manifestnode belongs to.
1693 1706 def lookup_manifest_link(mnfstnode):
1694 1707 return msng_mnfst_set[mnfstnode]
1695 1708
1696 1709 # A function generating function that sets up the initial environment
1697 1710 # the inner function.
1698 1711 def filenode_collector(changedfiles):
1699 1712 next_rev = [0]
1700 1713 # This gathers information from each manifestnode included in the
1701 1714 # changegroup about which filenodes the manifest node references
1702 1715 # so we can include those in the changegroup too.
1703 1716 #
1704 1717 # It also remembers which changenode each filenode belongs to. It
1705 1718 # does this by assuming the a filenode belongs to the changenode
1706 1719 # the first manifest that references it belongs to.
1707 1720 def collect_msng_filenodes(mnfstnode):
1708 1721 r = mnfst.rev(mnfstnode)
1709 1722 if r == next_rev[0]:
1710 1723 # If the last rev we looked at was the one just previous,
1711 1724 # we only need to see a diff.
1712 1725 deltamf = mnfst.readdelta(mnfstnode)
1713 1726 # For each line in the delta
1714 1727 for f, fnode in deltamf.items():
1715 1728 f = changedfiles.get(f, None)
1716 1729 # And if the file is in the list of files we care
1717 1730 # about.
1718 1731 if f is not None:
1719 1732 # Get the changenode this manifest belongs to
1720 1733 clnode = msng_mnfst_set[mnfstnode]
1721 1734 # Create the set of filenodes for the file if
1722 1735 # there isn't one already.
1723 1736 ndset = msng_filenode_set.setdefault(f, {})
1724 1737 # And set the filenode's changelog node to the
1725 1738 # manifest's if it hasn't been set already.
1726 1739 ndset.setdefault(fnode, clnode)
1727 1740 else:
1728 1741 # Otherwise we need a full manifest.
1729 1742 m = mnfst.read(mnfstnode)
1730 1743 # For every file in we care about.
1731 1744 for f in changedfiles:
1732 1745 fnode = m.get(f, None)
1733 1746 # If it's in the manifest
1734 1747 if fnode is not None:
1735 1748 # See comments above.
1736 1749 clnode = msng_mnfst_set[mnfstnode]
1737 1750 ndset = msng_filenode_set.setdefault(f, {})
1738 1751 ndset.setdefault(fnode, clnode)
1739 1752 # Remember the revision we hope to see next.
1740 1753 next_rev[0] = r + 1
1741 1754 return collect_msng_filenodes
1742 1755
1743 1756 # We have a list of filenodes we think we need for a file, lets remove
1744 1757 # all those we now the recipient must have.
1745 1758 def prune_filenodes(f, filerevlog):
1746 1759 msngset = msng_filenode_set[f]
1747 1760 hasset = {}
1748 1761 # If a 'missing' filenode thinks it belongs to a changenode we
1749 1762 # assume the recipient must have, then the recipient must have
1750 1763 # that filenode.
1751 1764 for n in msngset:
1752 1765 clnode = cl.node(filerevlog.linkrev(n))
1753 1766 if clnode in has_cl_set:
1754 1767 hasset[n] = 1
1755 1768 prune_parents(filerevlog, hasset, msngset)
1756 1769
1757 1770 # A function generator function that sets up the a context for the
1758 1771 # inner function.
1759 1772 def lookup_filenode_link_func(fname):
1760 1773 msngset = msng_filenode_set[fname]
1761 1774 # Lookup the changenode the filenode belongs to.
1762 1775 def lookup_filenode_link(fnode):
1763 1776 return msngset[fnode]
1764 1777 return lookup_filenode_link
1765 1778
1766 1779 # Add the nodes that were explicitly requested.
1767 1780 def add_extra_nodes(name, nodes):
1768 1781 if not extranodes or name not in extranodes:
1769 1782 return
1770 1783
1771 1784 for node, linknode in extranodes[name]:
1772 1785 if node not in nodes:
1773 1786 nodes[node] = linknode
1774 1787
1775 1788 # Now that we have all theses utility functions to help out and
1776 1789 # logically divide up the task, generate the group.
1777 1790 def gengroup():
1778 1791 # The set of changed files starts empty.
1779 1792 changedfiles = {}
1780 1793 # Create a changenode group generator that will call our functions
1781 1794 # back to lookup the owning changenode and collect information.
1782 1795 group = cl.group(msng_cl_lst, identity,
1783 1796 manifest_and_file_collector(changedfiles))
1784 1797 for chnk in group:
1785 1798 yield chnk
1786 1799
1787 1800 # The list of manifests has been collected by the generator
1788 1801 # calling our functions back.
1789 1802 prune_manifests()
1790 1803 add_extra_nodes(1, msng_mnfst_set)
1791 1804 msng_mnfst_lst = msng_mnfst_set.keys()
1792 1805 # Sort the manifestnodes by revision number.
1793 1806 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1794 1807 # Create a generator for the manifestnodes that calls our lookup
1795 1808 # and data collection functions back.
1796 1809 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1797 1810 filenode_collector(changedfiles))
1798 1811 for chnk in group:
1799 1812 yield chnk
1800 1813
1801 1814 # These are no longer needed, dereference and toss the memory for
1802 1815 # them.
1803 1816 msng_mnfst_lst = None
1804 1817 msng_mnfst_set.clear()
1805 1818
1806 1819 if extranodes:
1807 1820 for fname in extranodes:
1808 1821 if isinstance(fname, int):
1809 1822 continue
1810 1823 add_extra_nodes(fname,
1811 1824 msng_filenode_set.setdefault(fname, {}))
1812 1825 changedfiles[fname] = 1
1813 1826 changedfiles = changedfiles.keys()
1814 1827 changedfiles.sort()
1815 1828 # Go through all our files in order sorted by name.
1816 1829 for fname in changedfiles:
1817 1830 filerevlog = self.file(fname)
1818 1831 if filerevlog.count() == 0:
1819 1832 raise util.Abort(_("empty or missing revlog for %s") % fname)
1820 1833 # Toss out the filenodes that the recipient isn't really
1821 1834 # missing.
1822 1835 if fname in msng_filenode_set:
1823 1836 prune_filenodes(fname, filerevlog)
1824 1837 msng_filenode_lst = msng_filenode_set[fname].keys()
1825 1838 else:
1826 1839 msng_filenode_lst = []
1827 1840 # If any filenodes are left, generate the group for them,
1828 1841 # otherwise don't bother.
1829 1842 if len(msng_filenode_lst) > 0:
1830 1843 yield changegroup.chunkheader(len(fname))
1831 1844 yield fname
1832 1845 # Sort the filenodes by their revision #
1833 1846 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1834 1847 # Create a group generator and only pass in a changenode
1835 1848 # lookup function as we need to collect no information
1836 1849 # from filenodes.
1837 1850 group = filerevlog.group(msng_filenode_lst,
1838 1851 lookup_filenode_link_func(fname))
1839 1852 for chnk in group:
1840 1853 yield chnk
1841 1854 if fname in msng_filenode_set:
1842 1855 # Don't need this anymore, toss it to free memory.
1843 1856 del msng_filenode_set[fname]
1844 1857 # Signal that no more groups are left.
1845 1858 yield changegroup.closechunk()
1846 1859
1847 1860 if msng_cl_lst:
1848 1861 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1849 1862
1850 1863 return util.chunkbuffer(gengroup())
1851 1864
1852 1865 def changegroup(self, basenodes, source):
1853 1866 """Generate a changegroup of all nodes that we have that a recipient
1854 1867 doesn't.
1855 1868
1856 1869 This is much easier than the previous function as we can assume that
1857 1870 the recipient has any changenode we aren't sending them."""
1858 1871
1859 1872 self.hook('preoutgoing', throw=True, source=source)
1860 1873
1861 1874 cl = self.changelog
1862 1875 nodes = cl.nodesbetween(basenodes, None)[0]
1863 1876 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1864 1877 self.changegroupinfo(nodes, source)
1865 1878
1866 1879 def identity(x):
1867 1880 return x
1868 1881
1869 1882 def gennodelst(revlog):
1870 1883 for r in xrange(0, revlog.count()):
1871 1884 n = revlog.node(r)
1872 1885 if revlog.linkrev(n) in revset:
1873 1886 yield n
1874 1887
1875 1888 def changed_file_collector(changedfileset):
1876 1889 def collect_changed_files(clnode):
1877 1890 c = cl.read(clnode)
1878 1891 for fname in c[3]:
1879 1892 changedfileset[fname] = 1
1880 1893 return collect_changed_files
1881 1894
1882 1895 def lookuprevlink_func(revlog):
1883 1896 def lookuprevlink(n):
1884 1897 return cl.node(revlog.linkrev(n))
1885 1898 return lookuprevlink
1886 1899
1887 1900 def gengroup():
1888 1901 # construct a list of all changed files
1889 1902 changedfiles = {}
1890 1903
1891 1904 for chnk in cl.group(nodes, identity,
1892 1905 changed_file_collector(changedfiles)):
1893 1906 yield chnk
1894 1907 changedfiles = changedfiles.keys()
1895 1908 changedfiles.sort()
1896 1909
1897 1910 mnfst = self.manifest
1898 1911 nodeiter = gennodelst(mnfst)
1899 1912 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1900 1913 yield chnk
1901 1914
1902 1915 for fname in changedfiles:
1903 1916 filerevlog = self.file(fname)
1904 1917 if filerevlog.count() == 0:
1905 1918 raise util.Abort(_("empty or missing revlog for %s") % fname)
1906 1919 nodeiter = gennodelst(filerevlog)
1907 1920 nodeiter = list(nodeiter)
1908 1921 if nodeiter:
1909 1922 yield changegroup.chunkheader(len(fname))
1910 1923 yield fname
1911 1924 lookup = lookuprevlink_func(filerevlog)
1912 1925 for chnk in filerevlog.group(nodeiter, lookup):
1913 1926 yield chnk
1914 1927
1915 1928 yield changegroup.closechunk()
1916 1929
1917 1930 if nodes:
1918 1931 self.hook('outgoing', node=hex(nodes[0]), source=source)
1919 1932
1920 1933 return util.chunkbuffer(gengroup())
1921 1934
1922 1935 def addchangegroup(self, source, srctype, url, emptyok=False):
1923 1936 """add changegroup to repo.
1924 1937
1925 1938 return values:
1926 1939 - nothing changed or no source: 0
1927 1940 - more heads than before: 1+added heads (2..n)
1928 1941 - less heads than before: -1-removed heads (-2..-n)
1929 1942 - number of heads stays the same: 1
1930 1943 """
1931 1944 def csmap(x):
1932 1945 self.ui.debug(_("add changeset %s\n") % short(x))
1933 1946 return cl.count()
1934 1947
1935 1948 def revmap(x):
1936 1949 return cl.rev(x)
1937 1950
1938 1951 if not source:
1939 1952 return 0
1940 1953
1941 1954 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1942 1955
1943 1956 changesets = files = revisions = 0
1944 1957
1945 1958 # write changelog data to temp files so concurrent readers will not see
1946 1959 # inconsistent view
1947 1960 cl = self.changelog
1948 1961 cl.delayupdate()
1949 1962 oldheads = len(cl.heads())
1950 1963
1951 1964 tr = self.transaction()
1952 1965 try:
1953 1966 trp = weakref.proxy(tr)
1954 1967 # pull off the changeset group
1955 1968 self.ui.status(_("adding changesets\n"))
1956 1969 cor = cl.count() - 1
1957 1970 chunkiter = changegroup.chunkiter(source)
1958 1971 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1959 1972 raise util.Abort(_("received changelog group is empty"))
1960 1973 cnr = cl.count() - 1
1961 1974 changesets = cnr - cor
1962 1975
1963 1976 # pull off the manifest group
1964 1977 self.ui.status(_("adding manifests\n"))
1965 1978 chunkiter = changegroup.chunkiter(source)
1966 1979 # no need to check for empty manifest group here:
1967 1980 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1968 1981 # no new manifest will be created and the manifest group will
1969 1982 # be empty during the pull
1970 1983 self.manifest.addgroup(chunkiter, revmap, trp)
1971 1984
1972 1985 # process the files
1973 1986 self.ui.status(_("adding file changes\n"))
1974 1987 while 1:
1975 1988 f = changegroup.getchunk(source)
1976 1989 if not f:
1977 1990 break
1978 1991 self.ui.debug(_("adding %s revisions\n") % f)
1979 1992 fl = self.file(f)
1980 1993 o = fl.count()
1981 1994 chunkiter = changegroup.chunkiter(source)
1982 1995 if fl.addgroup(chunkiter, revmap, trp) is None:
1983 1996 raise util.Abort(_("received file revlog group is empty"))
1984 1997 revisions += fl.count() - o
1985 1998 files += 1
1986 1999
1987 2000 # make changelog see real files again
1988 2001 cl.finalize(trp)
1989 2002
1990 2003 newheads = len(self.changelog.heads())
1991 2004 heads = ""
1992 2005 if oldheads and newheads != oldheads:
1993 2006 heads = _(" (%+d heads)") % (newheads - oldheads)
1994 2007
1995 2008 self.ui.status(_("added %d changesets"
1996 2009 " with %d changes to %d files%s\n")
1997 2010 % (changesets, revisions, files, heads))
1998 2011
1999 2012 if changesets > 0:
2000 2013 self.hook('pretxnchangegroup', throw=True,
2001 2014 node=hex(self.changelog.node(cor+1)), source=srctype,
2002 2015 url=url)
2003 2016
2004 2017 tr.close()
2005 2018 finally:
2006 2019 del tr
2007 2020
2008 2021 if changesets > 0:
2009 2022 # forcefully update the on-disk branch cache
2010 2023 self.ui.debug(_("updating the branch cache\n"))
2011 self.branchcache = None
2012 2024 self.branchtags()
2013 2025 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2014 2026 source=srctype, url=url)
2015 2027
2016 2028 for i in xrange(cor + 1, cnr + 1):
2017 2029 self.hook("incoming", node=hex(self.changelog.node(i)),
2018 2030 source=srctype, url=url)
2019 2031
2020 2032 # never return 0 here:
2021 2033 if newheads < oldheads:
2022 2034 return newheads - oldheads - 1
2023 2035 else:
2024 2036 return newheads - oldheads + 1
2025 2037
2026 2038
2027 2039 def stream_in(self, remote):
2028 2040 fp = remote.stream_out()
2029 2041 l = fp.readline()
2030 2042 try:
2031 2043 resp = int(l)
2032 2044 except ValueError:
2033 2045 raise util.UnexpectedOutput(
2034 2046 _('Unexpected response from remote server:'), l)
2035 2047 if resp == 1:
2036 2048 raise util.Abort(_('operation forbidden by server'))
2037 2049 elif resp == 2:
2038 2050 raise util.Abort(_('locking the remote repository failed'))
2039 2051 elif resp != 0:
2040 2052 raise util.Abort(_('the server sent an unknown error code'))
2041 2053 self.ui.status(_('streaming all changes\n'))
2042 2054 l = fp.readline()
2043 2055 try:
2044 2056 total_files, total_bytes = map(int, l.split(' ', 1))
2045 2057 except ValueError, TypeError:
2046 2058 raise util.UnexpectedOutput(
2047 2059 _('Unexpected response from remote server:'), l)
2048 2060 self.ui.status(_('%d files to transfer, %s of data\n') %
2049 2061 (total_files, util.bytecount(total_bytes)))
2050 2062 start = time.time()
2051 2063 for i in xrange(total_files):
2052 2064 # XXX doesn't support '\n' or '\r' in filenames
2053 2065 l = fp.readline()
2054 2066 try:
2055 2067 name, size = l.split('\0', 1)
2056 2068 size = int(size)
2057 2069 except ValueError, TypeError:
2058 2070 raise util.UnexpectedOutput(
2059 2071 _('Unexpected response from remote server:'), l)
2060 2072 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2061 2073 ofp = self.sopener(name, 'w')
2062 2074 for chunk in util.filechunkiter(fp, limit=size):
2063 2075 ofp.write(chunk)
2064 2076 ofp.close()
2065 2077 elapsed = time.time() - start
2066 2078 if elapsed <= 0:
2067 2079 elapsed = 0.001
2068 2080 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2069 2081 (util.bytecount(total_bytes), elapsed,
2070 2082 util.bytecount(total_bytes / elapsed)))
2071 2083 self.invalidate()
2072 2084 return len(self.heads()) + 1
2073 2085
2074 2086 def clone(self, remote, heads=[], stream=False):
2075 2087 '''clone remote repository.
2076 2088
2077 2089 keyword arguments:
2078 2090 heads: list of revs to clone (forces use of pull)
2079 2091 stream: use streaming clone if possible'''
2080 2092
2081 2093 # now, all clients that can request uncompressed clones can
2082 2094 # read repo formats supported by all servers that can serve
2083 2095 # them.
2084 2096
2085 2097 # if revlog format changes, client will have to check version
2086 2098 # and format flags on "stream" capability, and use
2087 2099 # uncompressed only if compatible.
2088 2100
2089 2101 if stream and not heads and remote.capable('stream'):
2090 2102 return self.stream_in(remote)
2091 2103 return self.pull(remote, heads)
2092 2104
2093 2105 # used to avoid circular references so destructors work
2094 2106 def aftertrans(files):
2095 2107 renamefiles = [tuple(t) for t in files]
2096 2108 def a():
2097 2109 for src, dest in renamefiles:
2098 2110 util.rename(src, dest)
2099 2111 return a
2100 2112
2101 2113 def instance(ui, path, create):
2102 2114 return localrepository(ui, util.drop_scheme('file', path), create)
2103 2115
2104 2116 def islocal(path):
2105 2117 return True
General Comments 0
You need to be logged in to leave comments. Login now