##// END OF EJS Templates
add format.usefncache config option (default is true)...
Adrian Buehlmann -
r7234:ae70fe61 default
parent child Browse files
Show More
@@ -1,2126 +1,2127
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store', 'fncache')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 39 requirements.append("fncache")
39 40 # create an invalid changelog
40 41 self.opener("00changelog.i", "a").write(
41 42 '\0\0\0\2' # represents revlogv2
42 43 ' dummy changelog to prevent using the old repo layout'
43 44 )
44 45 reqfile = self.opener("requires", "w")
45 46 for r in requirements:
46 47 reqfile.write("%s\n" % r)
47 48 reqfile.close()
48 49 else:
49 50 raise repo.RepoError(_("repository %s not found") % path)
50 51 elif create:
51 52 raise repo.RepoError(_("repository %s already exists") % path)
52 53 else:
53 54 # find requirements
54 55 requirements = []
55 56 try:
56 57 requirements = self.opener("requires").read().splitlines()
57 58 for r in requirements:
58 59 if r not in self.supported:
59 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 61 except IOError, inst:
61 62 if inst.errno != errno.ENOENT:
62 63 raise
63 64
64 65 self.store = store.store(requirements, self.path, util.opener)
65 66 self.spath = self.store.path
66 67 self.sopener = self.store.opener
67 68 self.sjoin = self.store.join
68 69 self.opener.createmode = self.store.createmode
69 70
70 71 self.ui = ui.ui(parentui=parentui)
71 72 try:
72 73 self.ui.readconfig(self.join("hgrc"), self.root)
73 74 extensions.loadall(self.ui)
74 75 except IOError:
75 76 pass
76 77
77 78 self.tagscache = None
78 79 self._tagstypecache = None
79 80 self.branchcache = None
80 81 self._ubranchcache = None # UTF-8 version of branchcache
81 82 self._branchcachetip = None
82 83 self.nodetagscache = None
83 84 self.filterpats = {}
84 85 self._datafilters = {}
85 86 self._transref = self._lockref = self._wlockref = None
86 87
87 88 def __getattr__(self, name):
88 89 if name == 'changelog':
89 90 self.changelog = changelog.changelog(self.sopener)
90 91 self.sopener.defversion = self.changelog.version
91 92 return self.changelog
92 93 if name == 'manifest':
93 94 self.changelog
94 95 self.manifest = manifest.manifest(self.sopener)
95 96 return self.manifest
96 97 if name == 'dirstate':
97 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 99 return self.dirstate
99 100 else:
100 101 raise AttributeError(name)
101 102
102 103 def __getitem__(self, changeid):
103 104 if changeid == None:
104 105 return context.workingctx(self)
105 106 return context.changectx(self, changeid)
106 107
107 108 def __nonzero__(self):
108 109 return True
109 110
110 111 def __len__(self):
111 112 return len(self.changelog)
112 113
113 114 def __iter__(self):
114 115 for i in xrange(len(self)):
115 116 yield i
116 117
117 118 def url(self):
118 119 return 'file:' + self.root
119 120
120 121 def hook(self, name, throw=False, **args):
121 122 return hook.hook(self.ui, self, name, throw, **args)
122 123
123 124 tag_disallowed = ':\r\n'
124 125
125 126 def _tag(self, names, node, message, local, user, date, parent=None,
126 127 extra={}):
127 128 use_dirstate = parent is None
128 129
129 130 if isinstance(names, str):
130 131 allchars = names
131 132 names = (names,)
132 133 else:
133 134 allchars = ''.join(names)
134 135 for c in self.tag_disallowed:
135 136 if c in allchars:
136 137 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 138
138 139 for name in names:
139 140 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 141 local=local)
141 142
142 143 def writetags(fp, names, munge, prevtags):
143 144 fp.seek(0, 2)
144 145 if prevtags and prevtags[-1] != '\n':
145 146 fp.write('\n')
146 147 for name in names:
147 148 m = munge and munge(name) or name
148 149 if self._tagstypecache and name in self._tagstypecache:
149 150 old = self.tagscache.get(name, nullid)
150 151 fp.write('%s %s\n' % (hex(old), m))
151 152 fp.write('%s %s\n' % (hex(node), m))
152 153 fp.close()
153 154
154 155 prevtags = ''
155 156 if local:
156 157 try:
157 158 fp = self.opener('localtags', 'r+')
158 159 except IOError, err:
159 160 fp = self.opener('localtags', 'a')
160 161 else:
161 162 prevtags = fp.read()
162 163
163 164 # local tags are stored in the current charset
164 165 writetags(fp, names, None, prevtags)
165 166 for name in names:
166 167 self.hook('tag', node=hex(node), tag=name, local=local)
167 168 return
168 169
169 170 if use_dirstate:
170 171 try:
171 172 fp = self.wfile('.hgtags', 'rb+')
172 173 except IOError, err:
173 174 fp = self.wfile('.hgtags', 'ab')
174 175 else:
175 176 prevtags = fp.read()
176 177 else:
177 178 try:
178 179 prevtags = self.filectx('.hgtags', parent).data()
179 180 except revlog.LookupError:
180 181 pass
181 182 fp = self.wfile('.hgtags', 'wb')
182 183 if prevtags:
183 184 fp.write(prevtags)
184 185
185 186 # committed tags are stored in UTF-8
186 187 writetags(fp, names, util.fromlocal, prevtags)
187 188
188 189 if use_dirstate and '.hgtags' not in self.dirstate:
189 190 self.add(['.hgtags'])
190 191
191 192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 193 extra=extra)
193 194
194 195 for name in names:
195 196 self.hook('tag', node=hex(node), tag=name, local=local)
196 197
197 198 return tagnode
198 199
199 200 def tag(self, names, node, message, local, user, date):
200 201 '''tag a revision with one or more symbolic names.
201 202
202 203 names is a list of strings or, when adding a single tag, names may be a
203 204 string.
204 205
205 206 if local is True, the tags are stored in a per-repository file.
206 207 otherwise, they are stored in the .hgtags file, and a new
207 208 changeset is committed with the change.
208 209
209 210 keyword arguments:
210 211
211 212 local: whether to store tags in non-version-controlled file
212 213 (default False)
213 214
214 215 message: commit message to use if committing
215 216
216 217 user: name of user to use if committing
217 218
218 219 date: date tuple to use if committing'''
219 220
220 221 for x in self.status()[:5]:
221 222 if '.hgtags' in x:
222 223 raise util.Abort(_('working copy of .hgtags is changed '
223 224 '(please commit .hgtags manually)'))
224 225
225 226 self._tag(names, node, message, local, user, date)
226 227
227 228 def tags(self):
228 229 '''return a mapping of tag to node'''
229 230 if self.tagscache:
230 231 return self.tagscache
231 232
232 233 globaltags = {}
233 234 tagtypes = {}
234 235
235 236 def readtags(lines, fn, tagtype):
236 237 filetags = {}
237 238 count = 0
238 239
239 240 def warn(msg):
240 241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 242
242 243 for l in lines:
243 244 count += 1
244 245 if not l:
245 246 continue
246 247 s = l.split(" ", 1)
247 248 if len(s) != 2:
248 249 warn(_("cannot parse entry"))
249 250 continue
250 251 node, key = s
251 252 key = util.tolocal(key.strip()) # stored in UTF-8
252 253 try:
253 254 bin_n = bin(node)
254 255 except TypeError:
255 256 warn(_("node '%s' is not well formed") % node)
256 257 continue
257 258 if bin_n not in self.changelog.nodemap:
258 259 warn(_("tag '%s' refers to unknown node") % key)
259 260 continue
260 261
261 262 h = []
262 263 if key in filetags:
263 264 n, h = filetags[key]
264 265 h.append(n)
265 266 filetags[key] = (bin_n, h)
266 267
267 268 for k, nh in filetags.items():
268 269 if k not in globaltags:
269 270 globaltags[k] = nh
270 271 tagtypes[k] = tagtype
271 272 continue
272 273
273 274 # we prefer the global tag if:
274 275 # it supercedes us OR
275 276 # mutual supercedes and it has a higher rank
276 277 # otherwise we win because we're tip-most
277 278 an, ah = nh
278 279 bn, bh = globaltags[k]
279 280 if (bn != an and an in bh and
280 281 (bn not in ah or len(bh) > len(ah))):
281 282 an = bn
282 283 ah.extend([n for n in bh if n not in ah])
283 284 globaltags[k] = an, ah
284 285 tagtypes[k] = tagtype
285 286
286 287 # read the tags file from each head, ending with the tip
287 288 f = None
288 289 for rev, node, fnode in self._hgtagsnodes():
289 290 f = (f and f.filectx(fnode) or
290 291 self.filectx('.hgtags', fileid=fnode))
291 292 readtags(f.data().splitlines(), f, "global")
292 293
293 294 try:
294 295 data = util.fromlocal(self.opener("localtags").read())
295 296 # localtags are stored in the local character set
296 297 # while the internal tag table is stored in UTF-8
297 298 readtags(data.splitlines(), "localtags", "local")
298 299 except IOError:
299 300 pass
300 301
301 302 self.tagscache = {}
302 303 self._tagstypecache = {}
303 304 for k,nh in globaltags.items():
304 305 n = nh[0]
305 306 if n != nullid:
306 307 self.tagscache[k] = n
307 308 self._tagstypecache[k] = tagtypes[k]
308 309 self.tagscache['tip'] = self.changelog.tip()
309 310 return self.tagscache
310 311
311 312 def tagtype(self, tagname):
312 313 '''
313 314 return the type of the given tag. result can be:
314 315
315 316 'local' : a local tag
316 317 'global' : a global tag
317 318 None : tag does not exist
318 319 '''
319 320
320 321 self.tags()
321 322
322 323 return self._tagstypecache.get(tagname)
323 324
324 325 def _hgtagsnodes(self):
325 326 heads = self.heads()
326 327 heads.reverse()
327 328 last = {}
328 329 ret = []
329 330 for node in heads:
330 331 c = self[node]
331 332 rev = c.rev()
332 333 try:
333 334 fnode = c.filenode('.hgtags')
334 335 except revlog.LookupError:
335 336 continue
336 337 ret.append((rev, node, fnode))
337 338 if fnode in last:
338 339 ret[last[fnode]] = None
339 340 last[fnode] = len(ret) - 1
340 341 return [item for item in ret if item]
341 342
342 343 def tagslist(self):
343 344 '''return a list of tags ordered by revision'''
344 345 l = []
345 346 for t, n in self.tags().items():
346 347 try:
347 348 r = self.changelog.rev(n)
348 349 except:
349 350 r = -2 # sort to the beginning of the list if unknown
350 351 l.append((r, t, n))
351 352 return [(t, n) for r, t, n in util.sort(l)]
352 353
353 354 def nodetags(self, node):
354 355 '''return the tags associated with a node'''
355 356 if not self.nodetagscache:
356 357 self.nodetagscache = {}
357 358 for t, n in self.tags().items():
358 359 self.nodetagscache.setdefault(n, []).append(t)
359 360 return self.nodetagscache.get(node, [])
360 361
361 362 def _branchtags(self, partial, lrev):
362 363 tiprev = len(self) - 1
363 364 if lrev != tiprev:
364 365 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 367
367 368 return partial
368 369
369 370 def branchtags(self):
370 371 tip = self.changelog.tip()
371 372 if self.branchcache is not None and self._branchcachetip == tip:
372 373 return self.branchcache
373 374
374 375 oldtip = self._branchcachetip
375 376 self._branchcachetip = tip
376 377 if self.branchcache is None:
377 378 self.branchcache = {} # avoid recursion in changectx
378 379 else:
379 380 self.branchcache.clear() # keep using the same dict
380 381 if oldtip is None or oldtip not in self.changelog.nodemap:
381 382 partial, last, lrev = self._readbranchcache()
382 383 else:
383 384 lrev = self.changelog.rev(oldtip)
384 385 partial = self._ubranchcache
385 386
386 387 self._branchtags(partial, lrev)
387 388
388 389 # the branch cache is stored on disk as UTF-8, but in the local
389 390 # charset internally
390 391 for k, v in partial.items():
391 392 self.branchcache[util.tolocal(k)] = v
392 393 self._ubranchcache = partial
393 394 return self.branchcache
394 395
395 396 def _readbranchcache(self):
396 397 partial = {}
397 398 try:
398 399 f = self.opener("branch.cache")
399 400 lines = f.read().split('\n')
400 401 f.close()
401 402 except (IOError, OSError):
402 403 return {}, nullid, nullrev
403 404
404 405 try:
405 406 last, lrev = lines.pop(0).split(" ", 1)
406 407 last, lrev = bin(last), int(lrev)
407 408 if lrev >= len(self) or self[lrev].node() != last:
408 409 # invalidate the cache
409 410 raise ValueError('invalidating branch cache (tip differs)')
410 411 for l in lines:
411 412 if not l: continue
412 413 node, label = l.split(" ", 1)
413 414 partial[label.strip()] = bin(node)
414 415 except (KeyboardInterrupt, util.SignalInterrupt):
415 416 raise
416 417 except Exception, inst:
417 418 if self.ui.debugflag:
418 419 self.ui.warn(str(inst), '\n')
419 420 partial, last, lrev = {}, nullid, nullrev
420 421 return partial, last, lrev
421 422
422 423 def _writebranchcache(self, branches, tip, tiprev):
423 424 try:
424 425 f = self.opener("branch.cache", "w", atomictemp=True)
425 426 f.write("%s %s\n" % (hex(tip), tiprev))
426 427 for label, node in branches.iteritems():
427 428 f.write("%s %s\n" % (hex(node), label))
428 429 f.rename()
429 430 except (IOError, OSError):
430 431 pass
431 432
432 433 def _updatebranchcache(self, partial, start, end):
433 434 for r in xrange(start, end):
434 435 c = self[r]
435 436 b = c.branch()
436 437 partial[b] = c.node()
437 438
438 439 def lookup(self, key):
439 440 if key == '.':
440 441 return self.dirstate.parents()[0]
441 442 elif key == 'null':
442 443 return nullid
443 444 n = self.changelog._match(key)
444 445 if n:
445 446 return n
446 447 if key in self.tags():
447 448 return self.tags()[key]
448 449 if key in self.branchtags():
449 450 return self.branchtags()[key]
450 451 n = self.changelog._partialmatch(key)
451 452 if n:
452 453 return n
453 454 try:
454 455 if len(key) == 20:
455 456 key = hex(key)
456 457 except:
457 458 pass
458 459 raise repo.RepoError(_("unknown revision '%s'") % key)
459 460
460 461 def local(self):
461 462 return True
462 463
463 464 def join(self, f):
464 465 return os.path.join(self.path, f)
465 466
466 467 def wjoin(self, f):
467 468 return os.path.join(self.root, f)
468 469
469 470 def rjoin(self, f):
470 471 return os.path.join(self.root, util.pconvert(f))
471 472
472 473 def file(self, f):
473 474 if f[0] == '/':
474 475 f = f[1:]
475 476 return filelog.filelog(self.sopener, f)
476 477
477 478 def changectx(self, changeid):
478 479 return self[changeid]
479 480
480 481 def parents(self, changeid=None):
481 482 '''get list of changectxs for parents of changeid'''
482 483 return self[changeid].parents()
483 484
484 485 def filectx(self, path, changeid=None, fileid=None):
485 486 """changeid can be a changeset revision, node, or tag.
486 487 fileid can be a file revision or node."""
487 488 return context.filectx(self, path, changeid, fileid)
488 489
489 490 def getcwd(self):
490 491 return self.dirstate.getcwd()
491 492
492 493 def pathto(self, f, cwd=None):
493 494 return self.dirstate.pathto(f, cwd)
494 495
495 496 def wfile(self, f, mode='r'):
496 497 return self.wopener(f, mode)
497 498
498 499 def _link(self, f):
499 500 return os.path.islink(self.wjoin(f))
500 501
501 502 def _filter(self, filter, filename, data):
502 503 if filter not in self.filterpats:
503 504 l = []
504 505 for pat, cmd in self.ui.configitems(filter):
505 506 if cmd == '!':
506 507 continue
507 508 mf = util.matcher(self.root, "", [pat], [], [])[1]
508 509 fn = None
509 510 params = cmd
510 511 for name, filterfn in self._datafilters.iteritems():
511 512 if cmd.startswith(name):
512 513 fn = filterfn
513 514 params = cmd[len(name):].lstrip()
514 515 break
515 516 if not fn:
516 517 fn = lambda s, c, **kwargs: util.filter(s, c)
517 518 # Wrap old filters not supporting keyword arguments
518 519 if not inspect.getargspec(fn)[2]:
519 520 oldfn = fn
520 521 fn = lambda s, c, **kwargs: oldfn(s, c)
521 522 l.append((mf, fn, params))
522 523 self.filterpats[filter] = l
523 524
524 525 for mf, fn, cmd in self.filterpats[filter]:
525 526 if mf(filename):
526 527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 529 break
529 530
530 531 return data
531 532
532 533 def adddatafilter(self, name, filter):
533 534 self._datafilters[name] = filter
534 535
535 536 def wread(self, filename):
536 537 if self._link(filename):
537 538 data = os.readlink(self.wjoin(filename))
538 539 else:
539 540 data = self.wopener(filename, 'r').read()
540 541 return self._filter("encode", filename, data)
541 542
542 543 def wwrite(self, filename, data, flags):
543 544 data = self._filter("decode", filename, data)
544 545 try:
545 546 os.unlink(self.wjoin(filename))
546 547 except OSError:
547 548 pass
548 549 if 'l' in flags:
549 550 self.wopener.symlink(data, filename)
550 551 else:
551 552 self.wopener(filename, 'w').write(data)
552 553 if 'x' in flags:
553 554 util.set_flags(self.wjoin(filename), False, True)
554 555
555 556 def wwritedata(self, filename, data):
556 557 return self._filter("decode", filename, data)
557 558
558 559 def transaction(self):
559 560 if self._transref and self._transref():
560 561 return self._transref().nest()
561 562
562 563 # abort here if the journal already exists
563 564 if os.path.exists(self.sjoin("journal")):
564 565 raise repo.RepoError(_("journal already exists - run hg recover"))
565 566
566 567 # save dirstate for rollback
567 568 try:
568 569 ds = self.opener("dirstate").read()
569 570 except IOError:
570 571 ds = ""
571 572 self.opener("journal.dirstate", "w").write(ds)
572 573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 574
574 575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 577 (self.join("journal.branch"), self.join("undo.branch"))]
577 578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 579 self.sjoin("journal"),
579 580 aftertrans(renames),
580 581 self.store.createmode)
581 582 self._transref = weakref.ref(tr)
582 583 return tr
583 584
584 585 def recover(self):
585 586 l = self.lock()
586 587 try:
587 588 if os.path.exists(self.sjoin("journal")):
588 589 self.ui.status(_("rolling back interrupted transaction\n"))
589 590 transaction.rollback(self.sopener, self.sjoin("journal"))
590 591 self.invalidate()
591 592 return True
592 593 else:
593 594 self.ui.warn(_("no interrupted transaction available\n"))
594 595 return False
595 596 finally:
596 597 del l
597 598
598 599 def rollback(self):
599 600 wlock = lock = None
600 601 try:
601 602 wlock = self.wlock()
602 603 lock = self.lock()
603 604 if os.path.exists(self.sjoin("undo")):
604 605 self.ui.status(_("rolling back last transaction\n"))
605 606 transaction.rollback(self.sopener, self.sjoin("undo"))
606 607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 608 try:
608 609 branch = self.opener("undo.branch").read()
609 610 self.dirstate.setbranch(branch)
610 611 except IOError:
611 612 self.ui.warn(_("Named branch could not be reset, "
612 613 "current branch still is: %s\n")
613 614 % util.tolocal(self.dirstate.branch()))
614 615 self.invalidate()
615 616 self.dirstate.invalidate()
616 617 else:
617 618 self.ui.warn(_("no rollback information available\n"))
618 619 finally:
619 620 del lock, wlock
620 621
621 622 def invalidate(self):
622 623 for a in "changelog manifest".split():
623 624 if a in self.__dict__:
624 625 delattr(self, a)
625 626 self.tagscache = None
626 627 self._tagstypecache = None
627 628 self.nodetagscache = None
628 629 self.branchcache = None
629 630 self._ubranchcache = None
630 631 self._branchcachetip = None
631 632
632 633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 634 try:
634 635 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 636 except lock.LockHeld, inst:
636 637 if not wait:
637 638 raise
638 639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 640 (desc, inst.locker))
640 641 # default to 600 seconds timeout
641 642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 643 releasefn, desc=desc)
643 644 if acquirefn:
644 645 acquirefn()
645 646 return l
646 647
647 648 def lock(self, wait=True):
648 649 if self._lockref and self._lockref():
649 650 return self._lockref()
650 651
651 652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 653 _('repository %s') % self.origroot)
653 654 self._lockref = weakref.ref(l)
654 655 return l
655 656
656 657 def wlock(self, wait=True):
657 658 if self._wlockref and self._wlockref():
658 659 return self._wlockref()
659 660
660 661 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
661 662 self.dirstate.invalidate, _('working directory of %s') %
662 663 self.origroot)
663 664 self._wlockref = weakref.ref(l)
664 665 return l
665 666
666 667 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
667 668 """
668 669 commit an individual file as part of a larger transaction
669 670 """
670 671
671 672 fn = fctx.path()
672 673 t = fctx.data()
673 674 fl = self.file(fn)
674 675 fp1 = manifest1.get(fn, nullid)
675 676 fp2 = manifest2.get(fn, nullid)
676 677
677 678 meta = {}
678 679 cp = fctx.renamed()
679 680 if cp and cp[0] != fn:
680 681 # Mark the new revision of this file as a copy of another
681 682 # file. This copy data will effectively act as a parent
682 683 # of this new revision. If this is a merge, the first
683 684 # parent will be the nullid (meaning "look up the copy data")
684 685 # and the second one will be the other parent. For example:
685 686 #
686 687 # 0 --- 1 --- 3 rev1 changes file foo
687 688 # \ / rev2 renames foo to bar and changes it
688 689 # \- 2 -/ rev3 should have bar with all changes and
689 690 # should record that bar descends from
690 691 # bar in rev2 and foo in rev1
691 692 #
692 693 # this allows this merge to succeed:
693 694 #
694 695 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
695 696 # \ / merging rev3 and rev4 should use bar@rev2
696 697 # \- 2 --- 4 as the merge base
697 698 #
698 699
699 700 cf = cp[0]
700 701 cr = manifest1.get(cf)
701 702 nfp = fp2
702 703
703 704 if manifest2: # branch merge
704 705 if fp2 == nullid: # copied on remote side
705 706 if fp1 != nullid or cf in manifest2:
706 707 cr = manifest2[cf]
707 708 nfp = fp1
708 709
709 710 # find source in nearest ancestor if we've lost track
710 711 if not cr:
711 712 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
712 713 (fn, cf))
713 714 for a in self['.'].ancestors():
714 715 if cf in a:
715 716 cr = a[cf].filenode()
716 717 break
717 718
718 719 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
719 720 meta["copy"] = cf
720 721 meta["copyrev"] = hex(cr)
721 722 fp1, fp2 = nullid, nfp
722 723 elif fp2 != nullid:
723 724 # is one parent an ancestor of the other?
724 725 fpa = fl.ancestor(fp1, fp2)
725 726 if fpa == fp1:
726 727 fp1, fp2 = fp2, nullid
727 728 elif fpa == fp2:
728 729 fp2 = nullid
729 730
730 731 # is the file unmodified from the parent? report existing entry
731 732 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
732 733 return fp1
733 734
734 735 changelist.append(fn)
735 736 return fl.add(t, meta, tr, linkrev, fp1, fp2)
736 737
737 738 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
738 739 if p1 is None:
739 740 p1, p2 = self.dirstate.parents()
740 741 return self.commit(files=files, text=text, user=user, date=date,
741 742 p1=p1, p2=p2, extra=extra, empty_ok=True)
742 743
743 744 def commit(self, files=None, text="", user=None, date=None,
744 745 match=None, force=False, force_editor=False,
745 746 p1=None, p2=None, extra={}, empty_ok=False):
746 747 wlock = lock = None
747 748 if files:
748 749 files = util.unique(files)
749 750 try:
750 751 wlock = self.wlock()
751 752 lock = self.lock()
752 753 use_dirstate = (p1 is None) # not rawcommit
753 754
754 755 if use_dirstate:
755 756 p1, p2 = self.dirstate.parents()
756 757 update_dirstate = True
757 758
758 759 if (not force and p2 != nullid and
759 760 (match and (match.files() or match.anypats()))):
760 761 raise util.Abort(_('cannot partially commit a merge '
761 762 '(do not specify files or patterns)'))
762 763
763 764 if files:
764 765 modified, removed = [], []
765 766 for f in files:
766 767 s = self.dirstate[f]
767 768 if s in 'nma':
768 769 modified.append(f)
769 770 elif s == 'r':
770 771 removed.append(f)
771 772 else:
772 773 self.ui.warn(_("%s not tracked!\n") % f)
773 774 changes = [modified, [], removed, [], []]
774 775 else:
775 776 changes = self.status(match=match)
776 777 else:
777 778 p1, p2 = p1, p2 or nullid
778 779 update_dirstate = (self.dirstate.parents()[0] == p1)
779 780 changes = [files, [], [], [], []]
780 781
781 782 ms = merge_.mergestate(self)
782 783 for f in changes[0]:
783 784 if f in ms and ms[f] == 'u':
784 785 raise util.Abort(_("unresolved merge conflicts "
785 786 "(see hg resolve)"))
786 787 wctx = context.workingctx(self, (p1, p2), text, user, date,
787 788 extra, changes)
788 789 return self._commitctx(wctx, force, force_editor, empty_ok,
789 790 use_dirstate, update_dirstate)
790 791 finally:
791 792 del lock, wlock
792 793
793 794 def commitctx(self, ctx):
794 795 """Add a new revision to current repository.
795 796
796 797 Revision information is passed in the context.memctx argument.
797 798 commitctx() does not touch the working directory.
798 799 """
799 800 wlock = lock = None
800 801 try:
801 802 wlock = self.wlock()
802 803 lock = self.lock()
803 804 return self._commitctx(ctx, force=True, force_editor=False,
804 805 empty_ok=True, use_dirstate=False,
805 806 update_dirstate=False)
806 807 finally:
807 808 del lock, wlock
808 809
809 810 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
810 811 use_dirstate=True, update_dirstate=True):
811 812 tr = None
812 813 valid = 0 # don't save the dirstate if this isn't set
813 814 try:
814 815 commit = util.sort(wctx.modified() + wctx.added())
815 816 remove = wctx.removed()
816 817 extra = wctx.extra().copy()
817 818 branchname = extra['branch']
818 819 user = wctx.user()
819 820 text = wctx.description()
820 821
821 822 p1, p2 = [p.node() for p in wctx.parents()]
822 823 c1 = self.changelog.read(p1)
823 824 c2 = self.changelog.read(p2)
824 825 m1 = self.manifest.read(c1[0]).copy()
825 826 m2 = self.manifest.read(c2[0])
826 827
827 828 if use_dirstate:
828 829 oldname = c1[5].get("branch") # stored in UTF-8
829 830 if (not commit and not remove and not force and p2 == nullid
830 831 and branchname == oldname):
831 832 self.ui.status(_("nothing changed\n"))
832 833 return None
833 834
834 835 xp1 = hex(p1)
835 836 if p2 == nullid: xp2 = ''
836 837 else: xp2 = hex(p2)
837 838
838 839 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839 840
840 841 tr = self.transaction()
841 842 trp = weakref.proxy(tr)
842 843
843 844 # check in files
844 845 new = {}
845 846 changed = []
846 847 linkrev = len(self)
847 848 for f in commit:
848 849 self.ui.note(f + "\n")
849 850 try:
850 851 fctx = wctx.filectx(f)
851 852 newflags = fctx.flags()
852 853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
853 854 if ((not changed or changed[-1] != f) and
854 855 m2.get(f) != new[f]):
855 856 # mention the file in the changelog if some
856 857 # flag changed, even if there was no content
857 858 # change.
858 859 if m1.flags(f) != newflags:
859 860 changed.append(f)
860 861 m1.set(f, newflags)
861 862 if use_dirstate:
862 863 self.dirstate.normal(f)
863 864
864 865 except (OSError, IOError):
865 866 if use_dirstate:
866 867 self.ui.warn(_("trouble committing %s!\n") % f)
867 868 raise
868 869 else:
869 870 remove.append(f)
870 871
871 872 updated, added = [], []
872 873 for f in util.sort(changed):
873 874 if f in m1 or f in m2:
874 875 updated.append(f)
875 876 else:
876 877 added.append(f)
877 878
878 879 # update manifest
879 880 m1.update(new)
880 881 removed = []
881 882
882 883 for f in util.sort(remove):
883 884 if f in m1:
884 885 del m1[f]
885 886 removed.append(f)
886 887 elif f in m2:
887 888 removed.append(f)
888 889 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
889 890 (new, removed))
890 891
891 892 # add changeset
892 893 if (not empty_ok and not text) or force_editor:
893 894 edittext = []
894 895 if text:
895 896 edittext.append(text)
896 897 edittext.append("")
897 898 edittext.append("") # Empty line between message and comments.
898 899 edittext.append(_("HG: Enter commit message."
899 900 " Lines beginning with 'HG:' are removed."))
900 901 edittext.append("HG: --")
901 902 edittext.append("HG: user: %s" % user)
902 903 if p2 != nullid:
903 904 edittext.append("HG: branch merge")
904 905 if branchname:
905 906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 907 edittext.extend(["HG: added %s" % f for f in added])
907 908 edittext.extend(["HG: changed %s" % f for f in updated])
908 909 edittext.extend(["HG: removed %s" % f for f in removed])
909 910 if not added and not updated and not removed:
910 911 edittext.append("HG: no files changed")
911 912 edittext.append("")
912 913 # run editor in the repository root
913 914 olddir = os.getcwd()
914 915 os.chdir(self.root)
915 916 text = self.ui.edit("\n".join(edittext), user)
916 917 os.chdir(olddir)
917 918
918 919 lines = [line.rstrip() for line in text.rstrip().splitlines()]
919 920 while lines and not lines[0]:
920 921 del lines[0]
921 922 if not lines and use_dirstate:
922 923 raise util.Abort(_("empty commit message"))
923 924 text = '\n'.join(lines)
924 925
925 926 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
926 927 user, wctx.date(), extra)
927 928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 929 parent2=xp2)
929 930 tr.close()
930 931
931 932 if self.branchcache:
932 933 self.branchtags()
933 934
934 935 if use_dirstate or update_dirstate:
935 936 self.dirstate.setparents(n)
936 937 if use_dirstate:
937 938 for f in removed:
938 939 self.dirstate.forget(f)
939 940 valid = 1 # our dirstate updates are complete
940 941
941 942 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
942 943 return n
943 944 finally:
944 945 if not valid: # don't save our updated dirstate
945 946 self.dirstate.invalidate()
946 947 del tr
947 948
948 949 def walk(self, match, node=None):
949 950 '''
950 951 walk recursively through the directory tree or a given
951 952 changeset, finding all files matched by the match
952 953 function
953 954 '''
954 955 return self[node].walk(match)
955 956
956 957 def status(self, node1='.', node2=None, match=None,
957 958 ignored=False, clean=False, unknown=False):
958 959 """return status of files between two nodes or node and working directory
959 960
960 961 If node1 is None, use the first dirstate parent instead.
961 962 If node2 is None, compare node1 with working directory.
962 963 """
963 964
964 965 def mfmatches(ctx):
965 966 mf = ctx.manifest().copy()
966 967 for fn in mf.keys():
967 968 if not match(fn):
968 969 del mf[fn]
969 970 return mf
970 971
971 972 if isinstance(node1, context.changectx):
972 973 ctx1 = node1
973 974 else:
974 975 ctx1 = self[node1]
975 976 if isinstance(node2, context.changectx):
976 977 ctx2 = node2
977 978 else:
978 979 ctx2 = self[node2]
979 980
980 981 working = ctx2 == self[None]
981 982 parentworking = working and ctx1 == self['.']
982 983 match = match or match_.always(self.root, self.getcwd())
983 984 listignored, listclean, listunknown = ignored, clean, unknown
984 985
985 986 # load earliest manifest first for caching reasons
986 987 if not working and ctx2.rev() < ctx1.rev():
987 988 ctx2.manifest()
988 989
989 990 if not parentworking:
990 991 def bad(f, msg):
991 992 if f not in ctx1:
992 993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
993 994 return False
994 995 match.bad = bad
995 996
996 997 if working: # we need to scan the working dir
997 998 s = self.dirstate.status(match, listignored, listclean, listunknown)
998 999 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
999 1000
1000 1001 # check for any possibly clean files
1001 1002 if parentworking and cmp:
1002 1003 fixup = []
1003 1004 # do a full compare of any files that might have changed
1004 1005 for f in cmp:
1005 1006 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1006 1007 or ctx1[f].cmp(ctx2[f].data())):
1007 1008 modified.append(f)
1008 1009 else:
1009 1010 fixup.append(f)
1010 1011
1011 1012 if listclean:
1012 1013 clean += fixup
1013 1014
1014 1015 # update dirstate for files that are actually clean
1015 1016 if fixup:
1016 1017 wlock = None
1017 1018 try:
1018 1019 try:
1019 1020 wlock = self.wlock(False)
1020 1021 for f in fixup:
1021 1022 self.dirstate.normal(f)
1022 1023 except lock.LockException:
1023 1024 pass
1024 1025 finally:
1025 1026 del wlock
1026 1027
1027 1028 if not parentworking:
1028 1029 mf1 = mfmatches(ctx1)
1029 1030 if working:
1030 1031 # we are comparing working dir against non-parent
1031 1032 # generate a pseudo-manifest for the working dir
1032 1033 mf2 = mfmatches(self['.'])
1033 1034 for f in cmp + modified + added:
1034 1035 mf2[f] = None
1035 1036 mf2.set(f, ctx2.flags(f))
1036 1037 for f in removed:
1037 1038 if f in mf2:
1038 1039 del mf2[f]
1039 1040 else:
1040 1041 # we are comparing two revisions
1041 1042 deleted, unknown, ignored = [], [], []
1042 1043 mf2 = mfmatches(ctx2)
1043 1044
1044 1045 modified, added, clean = [], [], []
1045 1046 for fn in mf2:
1046 1047 if fn in mf1:
1047 1048 if (mf1.flags(fn) != mf2.flags(fn) or
1048 1049 (mf1[fn] != mf2[fn] and
1049 1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1050 1051 modified.append(fn)
1051 1052 elif listclean:
1052 1053 clean.append(fn)
1053 1054 del mf1[fn]
1054 1055 else:
1055 1056 added.append(fn)
1056 1057 removed = mf1.keys()
1057 1058
1058 1059 r = modified, added, removed, deleted, unknown, ignored, clean
1059 1060 [l.sort() for l in r]
1060 1061 return r
1061 1062
1062 1063 def add(self, list):
1063 1064 wlock = self.wlock()
1064 1065 try:
1065 1066 rejected = []
1066 1067 for f in list:
1067 1068 p = self.wjoin(f)
1068 1069 try:
1069 1070 st = os.lstat(p)
1070 1071 except:
1071 1072 self.ui.warn(_("%s does not exist!\n") % f)
1072 1073 rejected.append(f)
1073 1074 continue
1074 1075 if st.st_size > 10000000:
1075 1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1076 1077 " performance problems\n"
1077 1078 "(use 'hg revert %s' to unadd the file)\n")
1078 1079 % (f, f))
1079 1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1080 1081 self.ui.warn(_("%s not added: only files and symlinks "
1081 1082 "supported currently\n") % f)
1082 1083 rejected.append(p)
1083 1084 elif self.dirstate[f] in 'amn':
1084 1085 self.ui.warn(_("%s already tracked!\n") % f)
1085 1086 elif self.dirstate[f] == 'r':
1086 1087 self.dirstate.normallookup(f)
1087 1088 else:
1088 1089 self.dirstate.add(f)
1089 1090 return rejected
1090 1091 finally:
1091 1092 del wlock
1092 1093
1093 1094 def forget(self, list):
1094 1095 wlock = self.wlock()
1095 1096 try:
1096 1097 for f in list:
1097 1098 if self.dirstate[f] != 'a':
1098 1099 self.ui.warn(_("%s not added!\n") % f)
1099 1100 else:
1100 1101 self.dirstate.forget(f)
1101 1102 finally:
1102 1103 del wlock
1103 1104
1104 1105 def remove(self, list, unlink=False):
1105 1106 wlock = None
1106 1107 try:
1107 1108 if unlink:
1108 1109 for f in list:
1109 1110 try:
1110 1111 util.unlink(self.wjoin(f))
1111 1112 except OSError, inst:
1112 1113 if inst.errno != errno.ENOENT:
1113 1114 raise
1114 1115 wlock = self.wlock()
1115 1116 for f in list:
1116 1117 if unlink and os.path.exists(self.wjoin(f)):
1117 1118 self.ui.warn(_("%s still exists!\n") % f)
1118 1119 elif self.dirstate[f] == 'a':
1119 1120 self.dirstate.forget(f)
1120 1121 elif f not in self.dirstate:
1121 1122 self.ui.warn(_("%s not tracked!\n") % f)
1122 1123 else:
1123 1124 self.dirstate.remove(f)
1124 1125 finally:
1125 1126 del wlock
1126 1127
1127 1128 def undelete(self, list):
1128 1129 wlock = None
1129 1130 try:
1130 1131 manifests = [self.manifest.read(self.changelog.read(p)[0])
1131 1132 for p in self.dirstate.parents() if p != nullid]
1132 1133 wlock = self.wlock()
1133 1134 for f in list:
1134 1135 if self.dirstate[f] != 'r':
1135 1136 self.ui.warn(_("%s not removed!\n") % f)
1136 1137 else:
1137 1138 m = f in manifests[0] and manifests[0] or manifests[1]
1138 1139 t = self.file(f).read(m[f])
1139 1140 self.wwrite(f, t, m.flags(f))
1140 1141 self.dirstate.normal(f)
1141 1142 finally:
1142 1143 del wlock
1143 1144
1144 1145 def copy(self, source, dest):
1145 1146 wlock = None
1146 1147 try:
1147 1148 p = self.wjoin(dest)
1148 1149 if not (os.path.exists(p) or os.path.islink(p)):
1149 1150 self.ui.warn(_("%s does not exist!\n") % dest)
1150 1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1151 1152 self.ui.warn(_("copy failed: %s is not a file or a "
1152 1153 "symbolic link\n") % dest)
1153 1154 else:
1154 1155 wlock = self.wlock()
1155 1156 if self.dirstate[dest] in '?r':
1156 1157 self.dirstate.add(dest)
1157 1158 self.dirstate.copy(source, dest)
1158 1159 finally:
1159 1160 del wlock
1160 1161
1161 1162 def heads(self, start=None):
1162 1163 heads = self.changelog.heads(start)
1163 1164 # sort the output in rev descending order
1164 1165 heads = [(-self.changelog.rev(h), h) for h in heads]
1165 1166 return [n for (r, n) in util.sort(heads)]
1166 1167
1167 1168 def branchheads(self, branch=None, start=None):
1168 1169 if branch is None:
1169 1170 branch = self[None].branch()
1170 1171 branches = self.branchtags()
1171 1172 if branch not in branches:
1172 1173 return []
1173 1174 # The basic algorithm is this:
1174 1175 #
1175 1176 # Start from the branch tip since there are no later revisions that can
1176 1177 # possibly be in this branch, and the tip is a guaranteed head.
1177 1178 #
1178 1179 # Remember the tip's parents as the first ancestors, since these by
1179 1180 # definition are not heads.
1180 1181 #
1181 1182 # Step backwards from the brach tip through all the revisions. We are
1182 1183 # guaranteed by the rules of Mercurial that we will now be visiting the
1183 1184 # nodes in reverse topological order (children before parents).
1184 1185 #
1185 1186 # If a revision is one of the ancestors of a head then we can toss it
1186 1187 # out of the ancestors set (we've already found it and won't be
1187 1188 # visiting it again) and put its parents in the ancestors set.
1188 1189 #
1189 1190 # Otherwise, if a revision is in the branch it's another head, since it
1190 1191 # wasn't in the ancestor list of an existing head. So add it to the
1191 1192 # head list, and add its parents to the ancestor list.
1192 1193 #
1193 1194 # If it is not in the branch ignore it.
1194 1195 #
1195 1196 # Once we have a list of heads, use nodesbetween to filter out all the
1196 1197 # heads that cannot be reached from startrev. There may be a more
1197 1198 # efficient way to do this as part of the previous algorithm.
1198 1199
1199 1200 set = util.set
1200 1201 heads = [self.changelog.rev(branches[branch])]
1201 1202 # Don't care if ancestors contains nullrev or not.
1202 1203 ancestors = set(self.changelog.parentrevs(heads[0]))
1203 1204 for rev in xrange(heads[0] - 1, nullrev, -1):
1204 1205 if rev in ancestors:
1205 1206 ancestors.update(self.changelog.parentrevs(rev))
1206 1207 ancestors.remove(rev)
1207 1208 elif self[rev].branch() == branch:
1208 1209 heads.append(rev)
1209 1210 ancestors.update(self.changelog.parentrevs(rev))
1210 1211 heads = [self.changelog.node(rev) for rev in heads]
1211 1212 if start is not None:
1212 1213 heads = self.changelog.nodesbetween([start], heads)[2]
1213 1214 return heads
1214 1215
1215 1216 def branches(self, nodes):
1216 1217 if not nodes:
1217 1218 nodes = [self.changelog.tip()]
1218 1219 b = []
1219 1220 for n in nodes:
1220 1221 t = n
1221 1222 while 1:
1222 1223 p = self.changelog.parents(n)
1223 1224 if p[1] != nullid or p[0] == nullid:
1224 1225 b.append((t, n, p[0], p[1]))
1225 1226 break
1226 1227 n = p[0]
1227 1228 return b
1228 1229
1229 1230 def between(self, pairs):
1230 1231 r = []
1231 1232
1232 1233 for top, bottom in pairs:
1233 1234 n, l, i = top, [], 0
1234 1235 f = 1
1235 1236
1236 1237 while n != bottom:
1237 1238 p = self.changelog.parents(n)[0]
1238 1239 if i == f:
1239 1240 l.append(n)
1240 1241 f = f * 2
1241 1242 n = p
1242 1243 i += 1
1243 1244
1244 1245 r.append(l)
1245 1246
1246 1247 return r
1247 1248
1248 1249 def findincoming(self, remote, base=None, heads=None, force=False):
1249 1250 """Return list of roots of the subsets of missing nodes from remote
1250 1251
1251 1252 If base dict is specified, assume that these nodes and their parents
1252 1253 exist on the remote side and that no child of a node of base exists
1253 1254 in both remote and self.
1254 1255 Furthermore base will be updated to include the nodes that exists
1255 1256 in self and remote but no children exists in self and remote.
1256 1257 If a list of heads is specified, return only nodes which are heads
1257 1258 or ancestors of these heads.
1258 1259
1259 1260 All the ancestors of base are in self and in remote.
1260 1261 All the descendants of the list returned are missing in self.
1261 1262 (and so we know that the rest of the nodes are missing in remote, see
1262 1263 outgoing)
1263 1264 """
1264 1265 m = self.changelog.nodemap
1265 1266 search = []
1266 1267 fetch = {}
1267 1268 seen = {}
1268 1269 seenbranch = {}
1269 1270 if base == None:
1270 1271 base = {}
1271 1272
1272 1273 if not heads:
1273 1274 heads = remote.heads()
1274 1275
1275 1276 if self.changelog.tip() == nullid:
1276 1277 base[nullid] = 1
1277 1278 if heads != [nullid]:
1278 1279 return [nullid]
1279 1280 return []
1280 1281
1281 1282 # assume we're closer to the tip than the root
1282 1283 # and start by examining the heads
1283 1284 self.ui.status(_("searching for changes\n"))
1284 1285
1285 1286 unknown = []
1286 1287 for h in heads:
1287 1288 if h not in m:
1288 1289 unknown.append(h)
1289 1290 else:
1290 1291 base[h] = 1
1291 1292
1292 1293 if not unknown:
1293 1294 return []
1294 1295
1295 1296 req = dict.fromkeys(unknown)
1296 1297 reqcnt = 0
1297 1298
1298 1299 # search through remote branches
1299 1300 # a 'branch' here is a linear segment of history, with four parts:
1300 1301 # head, root, first parent, second parent
1301 1302 # (a branch always has two parents (or none) by definition)
1302 1303 unknown = remote.branches(unknown)
1303 1304 while unknown:
1304 1305 r = []
1305 1306 while unknown:
1306 1307 n = unknown.pop(0)
1307 1308 if n[0] in seen:
1308 1309 continue
1309 1310
1310 1311 self.ui.debug(_("examining %s:%s\n")
1311 1312 % (short(n[0]), short(n[1])))
1312 1313 if n[0] == nullid: # found the end of the branch
1313 1314 pass
1314 1315 elif n in seenbranch:
1315 1316 self.ui.debug(_("branch already found\n"))
1316 1317 continue
1317 1318 elif n[1] and n[1] in m: # do we know the base?
1318 1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1319 1320 % (short(n[0]), short(n[1])))
1320 1321 search.append(n) # schedule branch range for scanning
1321 1322 seenbranch[n] = 1
1322 1323 else:
1323 1324 if n[1] not in seen and n[1] not in fetch:
1324 1325 if n[2] in m and n[3] in m:
1325 1326 self.ui.debug(_("found new changeset %s\n") %
1326 1327 short(n[1]))
1327 1328 fetch[n[1]] = 1 # earliest unknown
1328 1329 for p in n[2:4]:
1329 1330 if p in m:
1330 1331 base[p] = 1 # latest known
1331 1332
1332 1333 for p in n[2:4]:
1333 1334 if p not in req and p not in m:
1334 1335 r.append(p)
1335 1336 req[p] = 1
1336 1337 seen[n[0]] = 1
1337 1338
1338 1339 if r:
1339 1340 reqcnt += 1
1340 1341 self.ui.debug(_("request %d: %s\n") %
1341 1342 (reqcnt, " ".join(map(short, r))))
1342 1343 for p in xrange(0, len(r), 10):
1343 1344 for b in remote.branches(r[p:p+10]):
1344 1345 self.ui.debug(_("received %s:%s\n") %
1345 1346 (short(b[0]), short(b[1])))
1346 1347 unknown.append(b)
1347 1348
1348 1349 # do binary search on the branches we found
1349 1350 search = [(t, b) for (t, b, p1, p2) in search]
1350 1351 while search:
1351 1352 newsearch = []
1352 1353 reqcnt += 1
1353 1354 for n, l in zip(search, remote.between(search)):
1354 1355 l.append(n[1])
1355 1356 p = n[0]
1356 1357 f = 1
1357 1358 for i in l:
1358 1359 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1359 1360 if i in m:
1360 1361 if f <= 2:
1361 1362 self.ui.debug(_("found new branch changeset %s\n") %
1362 1363 short(p))
1363 1364 fetch[p] = 1
1364 1365 base[i] = 1
1365 1366 else:
1366 1367 self.ui.debug(_("narrowed branch search to %s:%s\n")
1367 1368 % (short(p), short(i)))
1368 1369 newsearch.append((p, i))
1369 1370 break
1370 1371 p, f = i, f * 2
1371 1372 search = newsearch
1372 1373
1373 1374 # sanity check our fetch list
1374 1375 for f in fetch.keys():
1375 1376 if f in m:
1376 1377 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1377 1378
1378 1379 if base.keys() == [nullid]:
1379 1380 if force:
1380 1381 self.ui.warn(_("warning: repository is unrelated\n"))
1381 1382 else:
1382 1383 raise util.Abort(_("repository is unrelated"))
1383 1384
1384 1385 self.ui.debug(_("found new changesets starting at ") +
1385 1386 " ".join([short(f) for f in fetch]) + "\n")
1386 1387
1387 1388 self.ui.debug(_("%d total queries\n") % reqcnt)
1388 1389
1389 1390 return fetch.keys()
1390 1391
1391 1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 1393 """Return list of nodes that are roots of subsets not in remote
1393 1394
1394 1395 If base dict is specified, assume that these nodes and their parents
1395 1396 exist on the remote side.
1396 1397 If a list of heads is specified, return only nodes which are heads
1397 1398 or ancestors of these heads, and return a second element which
1398 1399 contains all remote heads which get new children.
1399 1400 """
1400 1401 if base == None:
1401 1402 base = {}
1402 1403 self.findincoming(remote, base, heads, force=force)
1403 1404
1404 1405 self.ui.debug(_("common changesets up to ")
1405 1406 + " ".join(map(short, base.keys())) + "\n")
1406 1407
1407 1408 remain = dict.fromkeys(self.changelog.nodemap)
1408 1409
1409 1410 # prune everything remote has from the tree
1410 1411 del remain[nullid]
1411 1412 remove = base.keys()
1412 1413 while remove:
1413 1414 n = remove.pop(0)
1414 1415 if n in remain:
1415 1416 del remain[n]
1416 1417 for p in self.changelog.parents(n):
1417 1418 remove.append(p)
1418 1419
1419 1420 # find every node whose parents have been pruned
1420 1421 subset = []
1421 1422 # find every remote head that will get new children
1422 1423 updated_heads = {}
1423 1424 for n in remain:
1424 1425 p1, p2 = self.changelog.parents(n)
1425 1426 if p1 not in remain and p2 not in remain:
1426 1427 subset.append(n)
1427 1428 if heads:
1428 1429 if p1 in heads:
1429 1430 updated_heads[p1] = True
1430 1431 if p2 in heads:
1431 1432 updated_heads[p2] = True
1432 1433
1433 1434 # this is the set of all roots we have to push
1434 1435 if heads:
1435 1436 return subset, updated_heads.keys()
1436 1437 else:
1437 1438 return subset
1438 1439
1439 1440 def pull(self, remote, heads=None, force=False):
1440 1441 lock = self.lock()
1441 1442 try:
1442 1443 fetch = self.findincoming(remote, heads=heads, force=force)
1443 1444 if fetch == [nullid]:
1444 1445 self.ui.status(_("requesting all changes\n"))
1445 1446
1446 1447 if not fetch:
1447 1448 self.ui.status(_("no changes found\n"))
1448 1449 return 0
1449 1450
1450 1451 if heads is None:
1451 1452 cg = remote.changegroup(fetch, 'pull')
1452 1453 else:
1453 1454 if 'changegroupsubset' not in remote.capabilities:
1454 1455 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1455 1456 cg = remote.changegroupsubset(fetch, heads, 'pull')
1456 1457 return self.addchangegroup(cg, 'pull', remote.url())
1457 1458 finally:
1458 1459 del lock
1459 1460
1460 1461 def push(self, remote, force=False, revs=None):
1461 1462 # there are two ways to push to remote repo:
1462 1463 #
1463 1464 # addchangegroup assumes local user can lock remote
1464 1465 # repo (local filesystem, old ssh servers).
1465 1466 #
1466 1467 # unbundle assumes local user cannot lock remote repo (new ssh
1467 1468 # servers, http servers).
1468 1469
1469 1470 if remote.capable('unbundle'):
1470 1471 return self.push_unbundle(remote, force, revs)
1471 1472 return self.push_addchangegroup(remote, force, revs)
1472 1473
1473 1474 def prepush(self, remote, force, revs):
1474 1475 base = {}
1475 1476 remote_heads = remote.heads()
1476 1477 inc = self.findincoming(remote, base, remote_heads, force=force)
1477 1478
1478 1479 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1479 1480 if revs is not None:
1480 1481 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1481 1482 else:
1482 1483 bases, heads = update, self.changelog.heads()
1483 1484
1484 1485 if not bases:
1485 1486 self.ui.status(_("no changes found\n"))
1486 1487 return None, 1
1487 1488 elif not force:
1488 1489 # check if we're creating new remote heads
1489 1490 # to be a remote head after push, node must be either
1490 1491 # - unknown locally
1491 1492 # - a local outgoing head descended from update
1492 1493 # - a remote head that's known locally and not
1493 1494 # ancestral to an outgoing head
1494 1495
1495 1496 warn = 0
1496 1497
1497 1498 if remote_heads == [nullid]:
1498 1499 warn = 0
1499 1500 elif not revs and len(heads) > len(remote_heads):
1500 1501 warn = 1
1501 1502 else:
1502 1503 newheads = list(heads)
1503 1504 for r in remote_heads:
1504 1505 if r in self.changelog.nodemap:
1505 1506 desc = self.changelog.heads(r, heads)
1506 1507 l = [h for h in heads if h in desc]
1507 1508 if not l:
1508 1509 newheads.append(r)
1509 1510 else:
1510 1511 newheads.append(r)
1511 1512 if len(newheads) > len(remote_heads):
1512 1513 warn = 1
1513 1514
1514 1515 if warn:
1515 1516 self.ui.warn(_("abort: push creates new remote heads!\n"))
1516 1517 self.ui.status(_("(did you forget to merge?"
1517 1518 " use push -f to force)\n"))
1518 1519 return None, 0
1519 1520 elif inc:
1520 1521 self.ui.warn(_("note: unsynced remote changes!\n"))
1521 1522
1522 1523
1523 1524 if revs is None:
1524 1525 cg = self.changegroup(update, 'push')
1525 1526 else:
1526 1527 cg = self.changegroupsubset(update, revs, 'push')
1527 1528 return cg, remote_heads
1528 1529
1529 1530 def push_addchangegroup(self, remote, force, revs):
1530 1531 lock = remote.lock()
1531 1532 try:
1532 1533 ret = self.prepush(remote, force, revs)
1533 1534 if ret[0] is not None:
1534 1535 cg, remote_heads = ret
1535 1536 return remote.addchangegroup(cg, 'push', self.url())
1536 1537 return ret[1]
1537 1538 finally:
1538 1539 del lock
1539 1540
1540 1541 def push_unbundle(self, remote, force, revs):
1541 1542 # local repo finds heads on server, finds out what revs it
1542 1543 # must push. once revs transferred, if server finds it has
1543 1544 # different heads (someone else won commit/push race), server
1544 1545 # aborts.
1545 1546
1546 1547 ret = self.prepush(remote, force, revs)
1547 1548 if ret[0] is not None:
1548 1549 cg, remote_heads = ret
1549 1550 if force: remote_heads = ['force']
1550 1551 return remote.unbundle(cg, remote_heads, 'push')
1551 1552 return ret[1]
1552 1553
1553 1554 def changegroupinfo(self, nodes, source):
1554 1555 if self.ui.verbose or source == 'bundle':
1555 1556 self.ui.status(_("%d changesets found\n") % len(nodes))
1556 1557 if self.ui.debugflag:
1557 1558 self.ui.debug(_("List of changesets:\n"))
1558 1559 for node in nodes:
1559 1560 self.ui.debug("%s\n" % hex(node))
1560 1561
1561 1562 def changegroupsubset(self, bases, heads, source, extranodes=None):
1562 1563 """This function generates a changegroup consisting of all the nodes
1563 1564 that are descendents of any of the bases, and ancestors of any of
1564 1565 the heads.
1565 1566
1566 1567 It is fairly complex as determining which filenodes and which
1567 1568 manifest nodes need to be included for the changeset to be complete
1568 1569 is non-trivial.
1569 1570
1570 1571 Another wrinkle is doing the reverse, figuring out which changeset in
1571 1572 the changegroup a particular filenode or manifestnode belongs to.
1572 1573
1573 1574 The caller can specify some nodes that must be included in the
1574 1575 changegroup using the extranodes argument. It should be a dict
1575 1576 where the keys are the filenames (or 1 for the manifest), and the
1576 1577 values are lists of (node, linknode) tuples, where node is a wanted
1577 1578 node and linknode is the changelog node that should be transmitted as
1578 1579 the linkrev.
1579 1580 """
1580 1581
1581 1582 if extranodes is None:
1582 1583 # can we go through the fast path ?
1583 1584 heads.sort()
1584 1585 allheads = self.heads()
1585 1586 allheads.sort()
1586 1587 if heads == allheads:
1587 1588 common = []
1588 1589 # parents of bases are known from both sides
1589 1590 for n in bases:
1590 1591 for p in self.changelog.parents(n):
1591 1592 if p != nullid:
1592 1593 common.append(p)
1593 1594 return self._changegroup(common, source)
1594 1595
1595 1596 self.hook('preoutgoing', throw=True, source=source)
1596 1597
1597 1598 # Set up some initial variables
1598 1599 # Make it easy to refer to self.changelog
1599 1600 cl = self.changelog
1600 1601 # msng is short for missing - compute the list of changesets in this
1601 1602 # changegroup.
1602 1603 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1603 1604 self.changegroupinfo(msng_cl_lst, source)
1604 1605 # Some bases may turn out to be superfluous, and some heads may be
1605 1606 # too. nodesbetween will return the minimal set of bases and heads
1606 1607 # necessary to re-create the changegroup.
1607 1608
1608 1609 # Known heads are the list of heads that it is assumed the recipient
1609 1610 # of this changegroup will know about.
1610 1611 knownheads = {}
1611 1612 # We assume that all parents of bases are known heads.
1612 1613 for n in bases:
1613 1614 for p in cl.parents(n):
1614 1615 if p != nullid:
1615 1616 knownheads[p] = 1
1616 1617 knownheads = knownheads.keys()
1617 1618 if knownheads:
1618 1619 # Now that we know what heads are known, we can compute which
1619 1620 # changesets are known. The recipient must know about all
1620 1621 # changesets required to reach the known heads from the null
1621 1622 # changeset.
1622 1623 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1623 1624 junk = None
1624 1625 # Transform the list into an ersatz set.
1625 1626 has_cl_set = dict.fromkeys(has_cl_set)
1626 1627 else:
1627 1628 # If there were no known heads, the recipient cannot be assumed to
1628 1629 # know about any changesets.
1629 1630 has_cl_set = {}
1630 1631
1631 1632 # Make it easy to refer to self.manifest
1632 1633 mnfst = self.manifest
1633 1634 # We don't know which manifests are missing yet
1634 1635 msng_mnfst_set = {}
1635 1636 # Nor do we know which filenodes are missing.
1636 1637 msng_filenode_set = {}
1637 1638
1638 1639 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1639 1640 junk = None
1640 1641
1641 1642 # A changeset always belongs to itself, so the changenode lookup
1642 1643 # function for a changenode is identity.
1643 1644 def identity(x):
1644 1645 return x
1645 1646
1646 1647 # A function generating function. Sets up an environment for the
1647 1648 # inner function.
1648 1649 def cmp_by_rev_func(revlog):
1649 1650 # Compare two nodes by their revision number in the environment's
1650 1651 # revision history. Since the revision number both represents the
1651 1652 # most efficient order to read the nodes in, and represents a
1652 1653 # topological sorting of the nodes, this function is often useful.
1653 1654 def cmp_by_rev(a, b):
1654 1655 return cmp(revlog.rev(a), revlog.rev(b))
1655 1656 return cmp_by_rev
1656 1657
1657 1658 # If we determine that a particular file or manifest node must be a
1658 1659 # node that the recipient of the changegroup will already have, we can
1659 1660 # also assume the recipient will have all the parents. This function
1660 1661 # prunes them from the set of missing nodes.
1661 1662 def prune_parents(revlog, hasset, msngset):
1662 1663 haslst = hasset.keys()
1663 1664 haslst.sort(cmp_by_rev_func(revlog))
1664 1665 for node in haslst:
1665 1666 parentlst = [p for p in revlog.parents(node) if p != nullid]
1666 1667 while parentlst:
1667 1668 n = parentlst.pop()
1668 1669 if n not in hasset:
1669 1670 hasset[n] = 1
1670 1671 p = [p for p in revlog.parents(n) if p != nullid]
1671 1672 parentlst.extend(p)
1672 1673 for n in hasset:
1673 1674 msngset.pop(n, None)
1674 1675
1675 1676 # This is a function generating function used to set up an environment
1676 1677 # for the inner function to execute in.
1677 1678 def manifest_and_file_collector(changedfileset):
1678 1679 # This is an information gathering function that gathers
1679 1680 # information from each changeset node that goes out as part of
1680 1681 # the changegroup. The information gathered is a list of which
1681 1682 # manifest nodes are potentially required (the recipient may
1682 1683 # already have them) and total list of all files which were
1683 1684 # changed in any changeset in the changegroup.
1684 1685 #
1685 1686 # We also remember the first changenode we saw any manifest
1686 1687 # referenced by so we can later determine which changenode 'owns'
1687 1688 # the manifest.
1688 1689 def collect_manifests_and_files(clnode):
1689 1690 c = cl.read(clnode)
1690 1691 for f in c[3]:
1691 1692 # This is to make sure we only have one instance of each
1692 1693 # filename string for each filename.
1693 1694 changedfileset.setdefault(f, f)
1694 1695 msng_mnfst_set.setdefault(c[0], clnode)
1695 1696 return collect_manifests_and_files
1696 1697
1697 1698 # Figure out which manifest nodes (of the ones we think might be part
1698 1699 # of the changegroup) the recipient must know about and remove them
1699 1700 # from the changegroup.
1700 1701 def prune_manifests():
1701 1702 has_mnfst_set = {}
1702 1703 for n in msng_mnfst_set:
1703 1704 # If a 'missing' manifest thinks it belongs to a changenode
1704 1705 # the recipient is assumed to have, obviously the recipient
1705 1706 # must have that manifest.
1706 1707 linknode = cl.node(mnfst.linkrev(n))
1707 1708 if linknode in has_cl_set:
1708 1709 has_mnfst_set[n] = 1
1709 1710 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1710 1711
1711 1712 # Use the information collected in collect_manifests_and_files to say
1712 1713 # which changenode any manifestnode belongs to.
1713 1714 def lookup_manifest_link(mnfstnode):
1714 1715 return msng_mnfst_set[mnfstnode]
1715 1716
1716 1717 # A function generating function that sets up the initial environment
1717 1718 # the inner function.
1718 1719 def filenode_collector(changedfiles):
1719 1720 next_rev = [0]
1720 1721 # This gathers information from each manifestnode included in the
1721 1722 # changegroup about which filenodes the manifest node references
1722 1723 # so we can include those in the changegroup too.
1723 1724 #
1724 1725 # It also remembers which changenode each filenode belongs to. It
1725 1726 # does this by assuming the a filenode belongs to the changenode
1726 1727 # the first manifest that references it belongs to.
1727 1728 def collect_msng_filenodes(mnfstnode):
1728 1729 r = mnfst.rev(mnfstnode)
1729 1730 if r == next_rev[0]:
1730 1731 # If the last rev we looked at was the one just previous,
1731 1732 # we only need to see a diff.
1732 1733 deltamf = mnfst.readdelta(mnfstnode)
1733 1734 # For each line in the delta
1734 1735 for f, fnode in deltamf.items():
1735 1736 f = changedfiles.get(f, None)
1736 1737 # And if the file is in the list of files we care
1737 1738 # about.
1738 1739 if f is not None:
1739 1740 # Get the changenode this manifest belongs to
1740 1741 clnode = msng_mnfst_set[mnfstnode]
1741 1742 # Create the set of filenodes for the file if
1742 1743 # there isn't one already.
1743 1744 ndset = msng_filenode_set.setdefault(f, {})
1744 1745 # And set the filenode's changelog node to the
1745 1746 # manifest's if it hasn't been set already.
1746 1747 ndset.setdefault(fnode, clnode)
1747 1748 else:
1748 1749 # Otherwise we need a full manifest.
1749 1750 m = mnfst.read(mnfstnode)
1750 1751 # For every file in we care about.
1751 1752 for f in changedfiles:
1752 1753 fnode = m.get(f, None)
1753 1754 # If it's in the manifest
1754 1755 if fnode is not None:
1755 1756 # See comments above.
1756 1757 clnode = msng_mnfst_set[mnfstnode]
1757 1758 ndset = msng_filenode_set.setdefault(f, {})
1758 1759 ndset.setdefault(fnode, clnode)
1759 1760 # Remember the revision we hope to see next.
1760 1761 next_rev[0] = r + 1
1761 1762 return collect_msng_filenodes
1762 1763
1763 1764 # We have a list of filenodes we think we need for a file, lets remove
1764 1765 # all those we now the recipient must have.
1765 1766 def prune_filenodes(f, filerevlog):
1766 1767 msngset = msng_filenode_set[f]
1767 1768 hasset = {}
1768 1769 # If a 'missing' filenode thinks it belongs to a changenode we
1769 1770 # assume the recipient must have, then the recipient must have
1770 1771 # that filenode.
1771 1772 for n in msngset:
1772 1773 clnode = cl.node(filerevlog.linkrev(n))
1773 1774 if clnode in has_cl_set:
1774 1775 hasset[n] = 1
1775 1776 prune_parents(filerevlog, hasset, msngset)
1776 1777
1777 1778 # A function generator function that sets up the a context for the
1778 1779 # inner function.
1779 1780 def lookup_filenode_link_func(fname):
1780 1781 msngset = msng_filenode_set[fname]
1781 1782 # Lookup the changenode the filenode belongs to.
1782 1783 def lookup_filenode_link(fnode):
1783 1784 return msngset[fnode]
1784 1785 return lookup_filenode_link
1785 1786
1786 1787 # Add the nodes that were explicitly requested.
1787 1788 def add_extra_nodes(name, nodes):
1788 1789 if not extranodes or name not in extranodes:
1789 1790 return
1790 1791
1791 1792 for node, linknode in extranodes[name]:
1792 1793 if node not in nodes:
1793 1794 nodes[node] = linknode
1794 1795
1795 1796 # Now that we have all theses utility functions to help out and
1796 1797 # logically divide up the task, generate the group.
1797 1798 def gengroup():
1798 1799 # The set of changed files starts empty.
1799 1800 changedfiles = {}
1800 1801 # Create a changenode group generator that will call our functions
1801 1802 # back to lookup the owning changenode and collect information.
1802 1803 group = cl.group(msng_cl_lst, identity,
1803 1804 manifest_and_file_collector(changedfiles))
1804 1805 for chnk in group:
1805 1806 yield chnk
1806 1807
1807 1808 # The list of manifests has been collected by the generator
1808 1809 # calling our functions back.
1809 1810 prune_manifests()
1810 1811 add_extra_nodes(1, msng_mnfst_set)
1811 1812 msng_mnfst_lst = msng_mnfst_set.keys()
1812 1813 # Sort the manifestnodes by revision number.
1813 1814 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1814 1815 # Create a generator for the manifestnodes that calls our lookup
1815 1816 # and data collection functions back.
1816 1817 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1817 1818 filenode_collector(changedfiles))
1818 1819 for chnk in group:
1819 1820 yield chnk
1820 1821
1821 1822 # These are no longer needed, dereference and toss the memory for
1822 1823 # them.
1823 1824 msng_mnfst_lst = None
1824 1825 msng_mnfst_set.clear()
1825 1826
1826 1827 if extranodes:
1827 1828 for fname in extranodes:
1828 1829 if isinstance(fname, int):
1829 1830 continue
1830 1831 msng_filenode_set.setdefault(fname, {})
1831 1832 changedfiles[fname] = 1
1832 1833 # Go through all our files in order sorted by name.
1833 1834 for fname in util.sort(changedfiles):
1834 1835 filerevlog = self.file(fname)
1835 1836 if not len(filerevlog):
1836 1837 raise util.Abort(_("empty or missing revlog for %s") % fname)
1837 1838 # Toss out the filenodes that the recipient isn't really
1838 1839 # missing.
1839 1840 if fname in msng_filenode_set:
1840 1841 prune_filenodes(fname, filerevlog)
1841 1842 add_extra_nodes(fname, msng_filenode_set[fname])
1842 1843 msng_filenode_lst = msng_filenode_set[fname].keys()
1843 1844 else:
1844 1845 msng_filenode_lst = []
1845 1846 # If any filenodes are left, generate the group for them,
1846 1847 # otherwise don't bother.
1847 1848 if len(msng_filenode_lst) > 0:
1848 1849 yield changegroup.chunkheader(len(fname))
1849 1850 yield fname
1850 1851 # Sort the filenodes by their revision #
1851 1852 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1852 1853 # Create a group generator and only pass in a changenode
1853 1854 # lookup function as we need to collect no information
1854 1855 # from filenodes.
1855 1856 group = filerevlog.group(msng_filenode_lst,
1856 1857 lookup_filenode_link_func(fname))
1857 1858 for chnk in group:
1858 1859 yield chnk
1859 1860 if fname in msng_filenode_set:
1860 1861 # Don't need this anymore, toss it to free memory.
1861 1862 del msng_filenode_set[fname]
1862 1863 # Signal that no more groups are left.
1863 1864 yield changegroup.closechunk()
1864 1865
1865 1866 if msng_cl_lst:
1866 1867 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1867 1868
1868 1869 return util.chunkbuffer(gengroup())
1869 1870
1870 1871 def changegroup(self, basenodes, source):
1871 1872 # to avoid a race we use changegroupsubset() (issue1320)
1872 1873 return self.changegroupsubset(basenodes, self.heads(), source)
1873 1874
1874 1875 def _changegroup(self, common, source):
1875 1876 """Generate a changegroup of all nodes that we have that a recipient
1876 1877 doesn't.
1877 1878
1878 1879 This is much easier than the previous function as we can assume that
1879 1880 the recipient has any changenode we aren't sending them.
1880 1881
1881 1882 common is the set of common nodes between remote and self"""
1882 1883
1883 1884 self.hook('preoutgoing', throw=True, source=source)
1884 1885
1885 1886 cl = self.changelog
1886 1887 nodes = cl.findmissing(common)
1887 1888 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1888 1889 self.changegroupinfo(nodes, source)
1889 1890
1890 1891 def identity(x):
1891 1892 return x
1892 1893
1893 1894 def gennodelst(log):
1894 1895 for r in log:
1895 1896 n = log.node(r)
1896 1897 if log.linkrev(n) in revset:
1897 1898 yield n
1898 1899
1899 1900 def changed_file_collector(changedfileset):
1900 1901 def collect_changed_files(clnode):
1901 1902 c = cl.read(clnode)
1902 1903 for fname in c[3]:
1903 1904 changedfileset[fname] = 1
1904 1905 return collect_changed_files
1905 1906
1906 1907 def lookuprevlink_func(revlog):
1907 1908 def lookuprevlink(n):
1908 1909 return cl.node(revlog.linkrev(n))
1909 1910 return lookuprevlink
1910 1911
1911 1912 def gengroup():
1912 1913 # construct a list of all changed files
1913 1914 changedfiles = {}
1914 1915
1915 1916 for chnk in cl.group(nodes, identity,
1916 1917 changed_file_collector(changedfiles)):
1917 1918 yield chnk
1918 1919
1919 1920 mnfst = self.manifest
1920 1921 nodeiter = gennodelst(mnfst)
1921 1922 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1922 1923 yield chnk
1923 1924
1924 1925 for fname in util.sort(changedfiles):
1925 1926 filerevlog = self.file(fname)
1926 1927 if not len(filerevlog):
1927 1928 raise util.Abort(_("empty or missing revlog for %s") % fname)
1928 1929 nodeiter = gennodelst(filerevlog)
1929 1930 nodeiter = list(nodeiter)
1930 1931 if nodeiter:
1931 1932 yield changegroup.chunkheader(len(fname))
1932 1933 yield fname
1933 1934 lookup = lookuprevlink_func(filerevlog)
1934 1935 for chnk in filerevlog.group(nodeiter, lookup):
1935 1936 yield chnk
1936 1937
1937 1938 yield changegroup.closechunk()
1938 1939
1939 1940 if nodes:
1940 1941 self.hook('outgoing', node=hex(nodes[0]), source=source)
1941 1942
1942 1943 return util.chunkbuffer(gengroup())
1943 1944
1944 1945 def addchangegroup(self, source, srctype, url, emptyok=False):
1945 1946 """add changegroup to repo.
1946 1947
1947 1948 return values:
1948 1949 - nothing changed or no source: 0
1949 1950 - more heads than before: 1+added heads (2..n)
1950 1951 - less heads than before: -1-removed heads (-2..-n)
1951 1952 - number of heads stays the same: 1
1952 1953 """
1953 1954 def csmap(x):
1954 1955 self.ui.debug(_("add changeset %s\n") % short(x))
1955 1956 return len(cl)
1956 1957
1957 1958 def revmap(x):
1958 1959 return cl.rev(x)
1959 1960
1960 1961 if not source:
1961 1962 return 0
1962 1963
1963 1964 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1964 1965
1965 1966 changesets = files = revisions = 0
1966 1967
1967 1968 # write changelog data to temp files so concurrent readers will not see
1968 1969 # inconsistent view
1969 1970 cl = self.changelog
1970 1971 cl.delayupdate()
1971 1972 oldheads = len(cl.heads())
1972 1973
1973 1974 tr = self.transaction()
1974 1975 try:
1975 1976 trp = weakref.proxy(tr)
1976 1977 # pull off the changeset group
1977 1978 self.ui.status(_("adding changesets\n"))
1978 1979 cor = len(cl) - 1
1979 1980 chunkiter = changegroup.chunkiter(source)
1980 1981 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1981 1982 raise util.Abort(_("received changelog group is empty"))
1982 1983 cnr = len(cl) - 1
1983 1984 changesets = cnr - cor
1984 1985
1985 1986 # pull off the manifest group
1986 1987 self.ui.status(_("adding manifests\n"))
1987 1988 chunkiter = changegroup.chunkiter(source)
1988 1989 # no need to check for empty manifest group here:
1989 1990 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1990 1991 # no new manifest will be created and the manifest group will
1991 1992 # be empty during the pull
1992 1993 self.manifest.addgroup(chunkiter, revmap, trp)
1993 1994
1994 1995 # process the files
1995 1996 self.ui.status(_("adding file changes\n"))
1996 1997 while 1:
1997 1998 f = changegroup.getchunk(source)
1998 1999 if not f:
1999 2000 break
2000 2001 self.ui.debug(_("adding %s revisions\n") % f)
2001 2002 fl = self.file(f)
2002 2003 o = len(fl)
2003 2004 chunkiter = changegroup.chunkiter(source)
2004 2005 if fl.addgroup(chunkiter, revmap, trp) is None:
2005 2006 raise util.Abort(_("received file revlog group is empty"))
2006 2007 revisions += len(fl) - o
2007 2008 files += 1
2008 2009
2009 2010 # make changelog see real files again
2010 2011 cl.finalize(trp)
2011 2012
2012 2013 newheads = len(self.changelog.heads())
2013 2014 heads = ""
2014 2015 if oldheads and newheads != oldheads:
2015 2016 heads = _(" (%+d heads)") % (newheads - oldheads)
2016 2017
2017 2018 self.ui.status(_("added %d changesets"
2018 2019 " with %d changes to %d files%s\n")
2019 2020 % (changesets, revisions, files, heads))
2020 2021
2021 2022 if changesets > 0:
2022 2023 self.hook('pretxnchangegroup', throw=True,
2023 2024 node=hex(self.changelog.node(cor+1)), source=srctype,
2024 2025 url=url)
2025 2026
2026 2027 tr.close()
2027 2028 finally:
2028 2029 del tr
2029 2030
2030 2031 if changesets > 0:
2031 2032 # forcefully update the on-disk branch cache
2032 2033 self.ui.debug(_("updating the branch cache\n"))
2033 2034 self.branchtags()
2034 2035 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2035 2036 source=srctype, url=url)
2036 2037
2037 2038 for i in xrange(cor + 1, cnr + 1):
2038 2039 self.hook("incoming", node=hex(self.changelog.node(i)),
2039 2040 source=srctype, url=url)
2040 2041
2041 2042 # never return 0 here:
2042 2043 if newheads < oldheads:
2043 2044 return newheads - oldheads - 1
2044 2045 else:
2045 2046 return newheads - oldheads + 1
2046 2047
2047 2048
2048 2049 def stream_in(self, remote):
2049 2050 fp = remote.stream_out()
2050 2051 l = fp.readline()
2051 2052 try:
2052 2053 resp = int(l)
2053 2054 except ValueError:
2054 2055 raise util.UnexpectedOutput(
2055 2056 _('Unexpected response from remote server:'), l)
2056 2057 if resp == 1:
2057 2058 raise util.Abort(_('operation forbidden by server'))
2058 2059 elif resp == 2:
2059 2060 raise util.Abort(_('locking the remote repository failed'))
2060 2061 elif resp != 0:
2061 2062 raise util.Abort(_('the server sent an unknown error code'))
2062 2063 self.ui.status(_('streaming all changes\n'))
2063 2064 l = fp.readline()
2064 2065 try:
2065 2066 total_files, total_bytes = map(int, l.split(' ', 1))
2066 2067 except (ValueError, TypeError):
2067 2068 raise util.UnexpectedOutput(
2068 2069 _('Unexpected response from remote server:'), l)
2069 2070 self.ui.status(_('%d files to transfer, %s of data\n') %
2070 2071 (total_files, util.bytecount(total_bytes)))
2071 2072 start = time.time()
2072 2073 for i in xrange(total_files):
2073 2074 # XXX doesn't support '\n' or '\r' in filenames
2074 2075 l = fp.readline()
2075 2076 try:
2076 2077 name, size = l.split('\0', 1)
2077 2078 size = int(size)
2078 2079 except (ValueError, TypeError):
2079 2080 raise util.UnexpectedOutput(
2080 2081 _('Unexpected response from remote server:'), l)
2081 2082 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2082 2083 ofp = self.sopener(name, 'w')
2083 2084 for chunk in util.filechunkiter(fp, limit=size):
2084 2085 ofp.write(chunk)
2085 2086 ofp.close()
2086 2087 elapsed = time.time() - start
2087 2088 if elapsed <= 0:
2088 2089 elapsed = 0.001
2089 2090 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2090 2091 (util.bytecount(total_bytes), elapsed,
2091 2092 util.bytecount(total_bytes / elapsed)))
2092 2093 self.invalidate()
2093 2094 return len(self.heads()) + 1
2094 2095
2095 2096 def clone(self, remote, heads=[], stream=False):
2096 2097 '''clone remote repository.
2097 2098
2098 2099 keyword arguments:
2099 2100 heads: list of revs to clone (forces use of pull)
2100 2101 stream: use streaming clone if possible'''
2101 2102
2102 2103 # now, all clients that can request uncompressed clones can
2103 2104 # read repo formats supported by all servers that can serve
2104 2105 # them.
2105 2106
2106 2107 # if revlog format changes, client will have to check version
2107 2108 # and format flags on "stream" capability, and use
2108 2109 # uncompressed only if compatible.
2109 2110
2110 2111 if stream and not heads and remote.capable('stream'):
2111 2112 return self.stream_in(remote)
2112 2113 return self.pull(remote, heads)
2113 2114
2114 2115 # used to avoid circular references so destructors work
2115 2116 def aftertrans(files):
2116 2117 renamefiles = [tuple(t) for t in files]
2117 2118 def a():
2118 2119 for src, dest in renamefiles:
2119 2120 util.rename(src, dest)
2120 2121 return a
2121 2122
2122 2123 def instance(ui, path, create):
2123 2124 return localrepository(ui, util.drop_scheme('file', path), create)
2124 2125
2125 2126 def islocal(path):
2126 2127 return True
General Comments 0
You need to be logged in to leave comments. Login now