##// END OF EJS Templates
Print less scary warning when invalidating the branch cache.
Thomas Arendsen Hein -
r6056:0ad2ffbf default
parent child Browse files
Show More
@@ -1,2081 +1,2081 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71 self.sopener = util.encodedopener(util.opener(self.spath),
72 72 self.encodefn)
73 73
74 74 self.ui = ui.ui(parentui=parentui)
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.filterpats = {}
86 86 self._datafilters = {}
87 87 self._transref = self._lockref = self._wlockref = None
88 88
89 89 def __getattr__(self, name):
90 90 if name == 'changelog':
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 return self.changelog
94 94 if name == 'manifest':
95 95 self.changelog
96 96 self.manifest = manifest.manifest(self.sopener)
97 97 return self.manifest
98 98 if name == 'dirstate':
99 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 100 return self.dirstate
101 101 else:
102 102 raise AttributeError, name
103 103
104 104 def url(self):
105 105 return 'file:' + self.root
106 106
107 107 def hook(self, name, throw=False, **args):
108 108 return hook.hook(self.ui, self, name, throw, **args)
109 109
110 110 tag_disallowed = ':\r\n'
111 111
112 112 def _tag(self, name, node, message, local, user, date, parent=None,
113 113 extra={}):
114 114 use_dirstate = parent is None
115 115
116 116 for c in self.tag_disallowed:
117 117 if c in name:
118 118 raise util.Abort(_('%r cannot be used in a tag name') % c)
119 119
120 120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
121 121
122 122 def writetag(fp, name, munge, prevtags):
123 123 fp.seek(0, 2)
124 124 if prevtags and prevtags[-1] != '\n':
125 125 fp.write('\n')
126 126 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
127 127 fp.close()
128 128
129 129 prevtags = ''
130 130 if local:
131 131 try:
132 132 fp = self.opener('localtags', 'r+')
133 133 except IOError, err:
134 134 fp = self.opener('localtags', 'a')
135 135 else:
136 136 prevtags = fp.read()
137 137
138 138 # local tags are stored in the current charset
139 139 writetag(fp, name, None, prevtags)
140 140 self.hook('tag', node=hex(node), tag=name, local=local)
141 141 return
142 142
143 143 if use_dirstate:
144 144 try:
145 145 fp = self.wfile('.hgtags', 'rb+')
146 146 except IOError, err:
147 147 fp = self.wfile('.hgtags', 'ab')
148 148 else:
149 149 prevtags = fp.read()
150 150 else:
151 151 try:
152 152 prevtags = self.filectx('.hgtags', parent).data()
153 153 except revlog.LookupError:
154 154 pass
155 155 fp = self.wfile('.hgtags', 'wb')
156 156 if prevtags:
157 157 fp.write(prevtags)
158 158
159 159 # committed tags are stored in UTF-8
160 160 writetag(fp, name, util.fromlocal, prevtags)
161 161
162 162 if use_dirstate and '.hgtags' not in self.dirstate:
163 163 self.add(['.hgtags'])
164 164
165 165 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
166 166 extra=extra)
167 167
168 168 self.hook('tag', node=hex(node), tag=name, local=local)
169 169
170 170 return tagnode
171 171
172 172 def tag(self, name, node, message, local, user, date):
173 173 '''tag a revision with a symbolic name.
174 174
175 175 if local is True, the tag is stored in a per-repository file.
176 176 otherwise, it is stored in the .hgtags file, and a new
177 177 changeset is committed with the change.
178 178
179 179 keyword arguments:
180 180
181 181 local: whether to store tag in non-version-controlled file
182 182 (default False)
183 183
184 184 message: commit message to use if committing
185 185
186 186 user: name of user to use if committing
187 187
188 188 date: date tuple to use if committing'''
189 189
190 190 for x in self.status()[:5]:
191 191 if '.hgtags' in x:
192 192 raise util.Abort(_('working copy of .hgtags is changed '
193 193 '(please commit .hgtags manually)'))
194 194
195 195
196 196 self._tag(name, node, message, local, user, date)
197 197
198 198 def tags(self):
199 199 '''return a mapping of tag to node'''
200 200 if self.tagscache:
201 201 return self.tagscache
202 202
203 203 globaltags = {}
204 204 tagtypes = {}
205 205
206 206 def readtags(lines, fn, tagtype):
207 207 filetags = {}
208 208 count = 0
209 209
210 210 def warn(msg):
211 211 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
212 212
213 213 for l in lines:
214 214 count += 1
215 215 if not l:
216 216 continue
217 217 s = l.split(" ", 1)
218 218 if len(s) != 2:
219 219 warn(_("cannot parse entry"))
220 220 continue
221 221 node, key = s
222 222 key = util.tolocal(key.strip()) # stored in UTF-8
223 223 try:
224 224 bin_n = bin(node)
225 225 except TypeError:
226 226 warn(_("node '%s' is not well formed") % node)
227 227 continue
228 228 if bin_n not in self.changelog.nodemap:
229 229 warn(_("tag '%s' refers to unknown node") % key)
230 230 continue
231 231
232 232 h = []
233 233 if key in filetags:
234 234 n, h = filetags[key]
235 235 h.append(n)
236 236 filetags[key] = (bin_n, h)
237 237
238 238 for k, nh in filetags.items():
239 239 if k not in globaltags:
240 240 globaltags[k] = nh
241 241 tagtypes[k] = tagtype
242 242 continue
243 243
244 244 # we prefer the global tag if:
245 245 # it supercedes us OR
246 246 # mutual supercedes and it has a higher rank
247 247 # otherwise we win because we're tip-most
248 248 an, ah = nh
249 249 bn, bh = globaltags[k]
250 250 if (bn != an and an in bh and
251 251 (bn not in ah or len(bh) > len(ah))):
252 252 an = bn
253 253 ah.extend([n for n in bh if n not in ah])
254 254 globaltags[k] = an, ah
255 255 tagtypes[k] = tagtype
256 256
257 257 # read the tags file from each head, ending with the tip
258 258 f = None
259 259 for rev, node, fnode in self._hgtagsnodes():
260 260 f = (f and f.filectx(fnode) or
261 261 self.filectx('.hgtags', fileid=fnode))
262 262 readtags(f.data().splitlines(), f, "global")
263 263
264 264 try:
265 265 data = util.fromlocal(self.opener("localtags").read())
266 266 # localtags are stored in the local character set
267 267 # while the internal tag table is stored in UTF-8
268 268 readtags(data.splitlines(), "localtags", "local")
269 269 except IOError:
270 270 pass
271 271
272 272 self.tagscache = {}
273 273 self._tagstypecache = {}
274 274 for k,nh in globaltags.items():
275 275 n = nh[0]
276 276 if n != nullid:
277 277 self.tagscache[k] = n
278 278 self._tagstypecache[k] = tagtypes[k]
279 279 self.tagscache['tip'] = self.changelog.tip()
280 280
281 281 return self.tagscache
282 282
283 283 def tagtype(self, tagname):
284 284 '''
285 285 return the type of the given tag. result can be:
286 286
287 287 'local' : a local tag
288 288 'global' : a global tag
289 289 None : tag does not exist
290 290 '''
291 291
292 292 self.tags()
293 293
294 294 return self._tagstypecache.get(tagname)
295 295
296 296 def _hgtagsnodes(self):
297 297 heads = self.heads()
298 298 heads.reverse()
299 299 last = {}
300 300 ret = []
301 301 for node in heads:
302 302 c = self.changectx(node)
303 303 rev = c.rev()
304 304 try:
305 305 fnode = c.filenode('.hgtags')
306 306 except revlog.LookupError:
307 307 continue
308 308 ret.append((rev, node, fnode))
309 309 if fnode in last:
310 310 ret[last[fnode]] = None
311 311 last[fnode] = len(ret) - 1
312 312 return [item for item in ret if item]
313 313
314 314 def tagslist(self):
315 315 '''return a list of tags ordered by revision'''
316 316 l = []
317 317 for t, n in self.tags().items():
318 318 try:
319 319 r = self.changelog.rev(n)
320 320 except:
321 321 r = -2 # sort to the beginning of the list if unknown
322 322 l.append((r, t, n))
323 323 l.sort()
324 324 return [(t, n) for r, t, n in l]
325 325
326 326 def nodetags(self, node):
327 327 '''return the tags associated with a node'''
328 328 if not self.nodetagscache:
329 329 self.nodetagscache = {}
330 330 for t, n in self.tags().items():
331 331 self.nodetagscache.setdefault(n, []).append(t)
332 332 return self.nodetagscache.get(node, [])
333 333
334 334 def _branchtags(self):
335 335 partial, last, lrev = self._readbranchcache()
336 336
337 337 tiprev = self.changelog.count() - 1
338 338 if lrev != tiprev:
339 339 self._updatebranchcache(partial, lrev+1, tiprev+1)
340 340 self._writebranchcache(partial, self.changelog.tip(), tiprev)
341 341
342 342 return partial
343 343
344 344 def branchtags(self):
345 345 if self.branchcache is not None:
346 346 return self.branchcache
347 347
348 348 self.branchcache = {} # avoid recursion in changectx
349 349 partial = self._branchtags()
350 350
351 351 # the branch cache is stored on disk as UTF-8, but in the local
352 352 # charset internally
353 353 for k, v in partial.items():
354 354 self.branchcache[util.tolocal(k)] = v
355 355 return self.branchcache
356 356
357 357 def _readbranchcache(self):
358 358 partial = {}
359 359 try:
360 360 f = self.opener("branch.cache")
361 361 lines = f.read().split('\n')
362 362 f.close()
363 363 except (IOError, OSError):
364 364 return {}, nullid, nullrev
365 365
366 366 try:
367 367 last, lrev = lines.pop(0).split(" ", 1)
368 368 last, lrev = bin(last), int(lrev)
369 369 if not (lrev < self.changelog.count() and
370 370 self.changelog.node(lrev) == last): # sanity check
371 371 # invalidate the cache
372 raise ValueError('Invalid branch cache: unknown tip')
372 raise ValueError('invalidating branch cache (tip differs)')
373 373 for l in lines:
374 374 if not l: continue
375 375 node, label = l.split(" ", 1)
376 376 partial[label.strip()] = bin(node)
377 377 except (KeyboardInterrupt, util.SignalInterrupt):
378 378 raise
379 379 except Exception, inst:
380 380 if self.ui.debugflag:
381 381 self.ui.warn(str(inst), '\n')
382 382 partial, last, lrev = {}, nullid, nullrev
383 383 return partial, last, lrev
384 384
385 385 def _writebranchcache(self, branches, tip, tiprev):
386 386 try:
387 387 f = self.opener("branch.cache", "w", atomictemp=True)
388 388 f.write("%s %s\n" % (hex(tip), tiprev))
389 389 for label, node in branches.iteritems():
390 390 f.write("%s %s\n" % (hex(node), label))
391 391 f.rename()
392 392 except (IOError, OSError):
393 393 pass
394 394
395 395 def _updatebranchcache(self, partial, start, end):
396 396 for r in xrange(start, end):
397 397 c = self.changectx(r)
398 398 b = c.branch()
399 399 partial[b] = c.node()
400 400
401 401 def lookup(self, key):
402 402 if key == '.':
403 403 key, second = self.dirstate.parents()
404 404 if key == nullid:
405 405 raise repo.RepoError(_("no revision checked out"))
406 406 if second != nullid:
407 407 self.ui.warn(_("warning: working directory has two parents, "
408 408 "tag '.' uses the first\n"))
409 409 elif key == 'null':
410 410 return nullid
411 411 n = self.changelog._match(key)
412 412 if n:
413 413 return n
414 414 if key in self.tags():
415 415 return self.tags()[key]
416 416 if key in self.branchtags():
417 417 return self.branchtags()[key]
418 418 n = self.changelog._partialmatch(key)
419 419 if n:
420 420 return n
421 421 try:
422 422 if len(key) == 20:
423 423 key = hex(key)
424 424 except:
425 425 pass
426 426 raise repo.RepoError(_("unknown revision '%s'") % key)
427 427
428 428 def dev(self):
429 429 return os.lstat(self.path).st_dev
430 430
431 431 def local(self):
432 432 return True
433 433
434 434 def join(self, f):
435 435 return os.path.join(self.path, f)
436 436
437 437 def sjoin(self, f):
438 438 f = self.encodefn(f)
439 439 return os.path.join(self.spath, f)
440 440
441 441 def wjoin(self, f):
442 442 return os.path.join(self.root, f)
443 443
444 444 def file(self, f):
445 445 if f[0] == '/':
446 446 f = f[1:]
447 447 return filelog.filelog(self.sopener, f)
448 448
449 449 def changectx(self, changeid=None):
450 450 return context.changectx(self, changeid)
451 451
452 452 def workingctx(self):
453 453 return context.workingctx(self)
454 454
455 455 def parents(self, changeid=None):
456 456 '''
457 457 get list of changectxs for parents of changeid or working directory
458 458 '''
459 459 if changeid is None:
460 460 pl = self.dirstate.parents()
461 461 else:
462 462 n = self.changelog.lookup(changeid)
463 463 pl = self.changelog.parents(n)
464 464 if pl[1] == nullid:
465 465 return [self.changectx(pl[0])]
466 466 return [self.changectx(pl[0]), self.changectx(pl[1])]
467 467
468 468 def filectx(self, path, changeid=None, fileid=None):
469 469 """changeid can be a changeset revision, node, or tag.
470 470 fileid can be a file revision or node."""
471 471 return context.filectx(self, path, changeid, fileid)
472 472
473 473 def getcwd(self):
474 474 return self.dirstate.getcwd()
475 475
476 476 def pathto(self, f, cwd=None):
477 477 return self.dirstate.pathto(f, cwd)
478 478
479 479 def wfile(self, f, mode='r'):
480 480 return self.wopener(f, mode)
481 481
482 482 def _link(self, f):
483 483 return os.path.islink(self.wjoin(f))
484 484
485 485 def _filter(self, filter, filename, data):
486 486 if filter not in self.filterpats:
487 487 l = []
488 488 for pat, cmd in self.ui.configitems(filter):
489 489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 490 fn = None
491 491 for name, filterfn in self._datafilters.iteritems():
492 492 if cmd.startswith(name):
493 493 fn = filterfn
494 494 break
495 495 if not fn:
496 496 fn = lambda s, c, **kwargs: util.filter(s, c)
497 497 # Wrap old filters not supporting keyword arguments
498 498 if not inspect.getargspec(fn)[2]:
499 499 oldfn = fn
500 500 fn = lambda s, c, **kwargs: oldfn(s, c)
501 501 l.append((mf, fn, cmd))
502 502 self.filterpats[filter] = l
503 503
504 504 for mf, fn, cmd in self.filterpats[filter]:
505 505 if mf(filename):
506 506 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
507 507 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
508 508 break
509 509
510 510 return data
511 511
512 512 def adddatafilter(self, name, filter):
513 513 self._datafilters[name] = filter
514 514
515 515 def wread(self, filename):
516 516 if self._link(filename):
517 517 data = os.readlink(self.wjoin(filename))
518 518 else:
519 519 data = self.wopener(filename, 'r').read()
520 520 return self._filter("encode", filename, data)
521 521
522 522 def wwrite(self, filename, data, flags):
523 523 data = self._filter("decode", filename, data)
524 524 try:
525 525 os.unlink(self.wjoin(filename))
526 526 except OSError:
527 527 pass
528 528 self.wopener(filename, 'w').write(data)
529 529 util.set_flags(self.wjoin(filename), flags)
530 530
531 531 def wwritedata(self, filename, data):
532 532 return self._filter("decode", filename, data)
533 533
534 534 def transaction(self):
535 535 if self._transref and self._transref():
536 536 return self._transref().nest()
537 537
538 538 # abort here if the journal already exists
539 539 if os.path.exists(self.sjoin("journal")):
540 540 raise repo.RepoError(_("journal already exists - run hg recover"))
541 541
542 542 # save dirstate for rollback
543 543 try:
544 544 ds = self.opener("dirstate").read()
545 545 except IOError:
546 546 ds = ""
547 547 self.opener("journal.dirstate", "w").write(ds)
548 548 self.opener("journal.branch", "w").write(self.dirstate.branch())
549 549
550 550 renames = [(self.sjoin("journal"), self.sjoin("undo")),
551 551 (self.join("journal.dirstate"), self.join("undo.dirstate")),
552 552 (self.join("journal.branch"), self.join("undo.branch"))]
553 553 tr = transaction.transaction(self.ui.warn, self.sopener,
554 554 self.sjoin("journal"),
555 555 aftertrans(renames))
556 556 self._transref = weakref.ref(tr)
557 557 return tr
558 558
559 559 def recover(self):
560 560 l = self.lock()
561 561 try:
562 562 if os.path.exists(self.sjoin("journal")):
563 563 self.ui.status(_("rolling back interrupted transaction\n"))
564 564 transaction.rollback(self.sopener, self.sjoin("journal"))
565 565 self.invalidate()
566 566 return True
567 567 else:
568 568 self.ui.warn(_("no interrupted transaction available\n"))
569 569 return False
570 570 finally:
571 571 del l
572 572
573 573 def rollback(self):
574 574 wlock = lock = None
575 575 try:
576 576 wlock = self.wlock()
577 577 lock = self.lock()
578 578 if os.path.exists(self.sjoin("undo")):
579 579 self.ui.status(_("rolling back last transaction\n"))
580 580 transaction.rollback(self.sopener, self.sjoin("undo"))
581 581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
582 582 branch = self.opener("undo.branch").read()
583 583 self.dirstate.setbranch(branch)
584 584 self.invalidate()
585 585 self.dirstate.invalidate()
586 586 else:
587 587 self.ui.warn(_("no rollback information available\n"))
588 588 finally:
589 589 del lock, wlock
590 590
591 591 def invalidate(self):
592 592 for a in "changelog manifest".split():
593 593 if hasattr(self, a):
594 594 self.__delattr__(a)
595 595 self.tagscache = None
596 596 self._tagstypecache = None
597 597 self.nodetagscache = None
598 598
599 599 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
600 600 try:
601 601 l = lock.lock(lockname, 0, releasefn, desc=desc)
602 602 except lock.LockHeld, inst:
603 603 if not wait:
604 604 raise
605 605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
606 606 (desc, inst.locker))
607 607 # default to 600 seconds timeout
608 608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
609 609 releasefn, desc=desc)
610 610 if acquirefn:
611 611 acquirefn()
612 612 return l
613 613
614 614 def lock(self, wait=True):
615 615 if self._lockref and self._lockref():
616 616 return self._lockref()
617 617
618 618 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
619 619 _('repository %s') % self.origroot)
620 620 self._lockref = weakref.ref(l)
621 621 return l
622 622
623 623 def wlock(self, wait=True):
624 624 if self._wlockref and self._wlockref():
625 625 return self._wlockref()
626 626
627 627 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
628 628 self.dirstate.invalidate, _('working directory of %s') %
629 629 self.origroot)
630 630 self._wlockref = weakref.ref(l)
631 631 return l
632 632
633 633 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
634 634 """
635 635 commit an individual file as part of a larger transaction
636 636 """
637 637
638 638 t = self.wread(fn)
639 639 fl = self.file(fn)
640 640 fp1 = manifest1.get(fn, nullid)
641 641 fp2 = manifest2.get(fn, nullid)
642 642
643 643 meta = {}
644 644 cp = self.dirstate.copied(fn)
645 645 if cp:
646 646 # Mark the new revision of this file as a copy of another
647 647 # file. This copy data will effectively act as a parent
648 648 # of this new revision. If this is a merge, the first
649 649 # parent will be the nullid (meaning "look up the copy data")
650 650 # and the second one will be the other parent. For example:
651 651 #
652 652 # 0 --- 1 --- 3 rev1 changes file foo
653 653 # \ / rev2 renames foo to bar and changes it
654 654 # \- 2 -/ rev3 should have bar with all changes and
655 655 # should record that bar descends from
656 656 # bar in rev2 and foo in rev1
657 657 #
658 658 # this allows this merge to succeed:
659 659 #
660 660 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
661 661 # \ / merging rev3 and rev4 should use bar@rev2
662 662 # \- 2 --- 4 as the merge base
663 663 #
664 664 meta["copy"] = cp
665 665 if not manifest2: # not a branch merge
666 666 meta["copyrev"] = hex(manifest1.get(cp, nullid))
667 667 fp2 = nullid
668 668 elif fp2 != nullid: # copied on remote side
669 669 meta["copyrev"] = hex(manifest1.get(cp, nullid))
670 670 elif fp1 != nullid: # copied on local side, reversed
671 671 meta["copyrev"] = hex(manifest2.get(cp))
672 672 fp2 = fp1
673 673 elif cp in manifest2: # directory rename on local side
674 674 meta["copyrev"] = hex(manifest2[cp])
675 675 else: # directory rename on remote side
676 676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 677 self.ui.debug(_(" %s: copy %s:%s\n") %
678 678 (fn, cp, meta["copyrev"]))
679 679 fp1 = nullid
680 680 elif fp2 != nullid:
681 681 # is one parent an ancestor of the other?
682 682 fpa = fl.ancestor(fp1, fp2)
683 683 if fpa == fp1:
684 684 fp1, fp2 = fp2, nullid
685 685 elif fpa == fp2:
686 686 fp2 = nullid
687 687
688 688 # is the file unmodified from the parent? report existing entry
689 689 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
690 690 return fp1
691 691
692 692 changelist.append(fn)
693 693 return fl.add(t, meta, tr, linkrev, fp1, fp2)
694 694
695 695 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
696 696 if p1 is None:
697 697 p1, p2 = self.dirstate.parents()
698 698 return self.commit(files=files, text=text, user=user, date=date,
699 699 p1=p1, p2=p2, extra=extra, empty_ok=True)
700 700
701 701 def commit(self, files=None, text="", user=None, date=None,
702 702 match=util.always, force=False, force_editor=False,
703 703 p1=None, p2=None, extra={}, empty_ok=False):
704 704 wlock = lock = tr = None
705 705 valid = 0 # don't save the dirstate if this isn't set
706 706 if files:
707 707 files = util.unique(files)
708 708 try:
709 709 commit = []
710 710 remove = []
711 711 changed = []
712 712 use_dirstate = (p1 is None) # not rawcommit
713 713 extra = extra.copy()
714 714
715 715 if use_dirstate:
716 716 if files:
717 717 for f in files:
718 718 s = self.dirstate[f]
719 719 if s in 'nma':
720 720 commit.append(f)
721 721 elif s == 'r':
722 722 remove.append(f)
723 723 else:
724 724 self.ui.warn(_("%s not tracked!\n") % f)
725 725 else:
726 726 changes = self.status(match=match)[:5]
727 727 modified, added, removed, deleted, unknown = changes
728 728 commit = modified + added
729 729 remove = removed
730 730 else:
731 731 commit = files
732 732
733 733 if use_dirstate:
734 734 p1, p2 = self.dirstate.parents()
735 735 update_dirstate = True
736 736 else:
737 737 p1, p2 = p1, p2 or nullid
738 738 update_dirstate = (self.dirstate.parents()[0] == p1)
739 739
740 740 c1 = self.changelog.read(p1)
741 741 c2 = self.changelog.read(p2)
742 742 m1 = self.manifest.read(c1[0]).copy()
743 743 m2 = self.manifest.read(c2[0])
744 744
745 745 if use_dirstate:
746 746 branchname = self.workingctx().branch()
747 747 try:
748 748 branchname = branchname.decode('UTF-8').encode('UTF-8')
749 749 except UnicodeDecodeError:
750 750 raise util.Abort(_('branch name not in UTF-8!'))
751 751 else:
752 752 branchname = ""
753 753
754 754 if use_dirstate:
755 755 oldname = c1[5].get("branch") # stored in UTF-8
756 756 if (not commit and not remove and not force and p2 == nullid
757 757 and branchname == oldname):
758 758 self.ui.status(_("nothing changed\n"))
759 759 return None
760 760
761 761 xp1 = hex(p1)
762 762 if p2 == nullid: xp2 = ''
763 763 else: xp2 = hex(p2)
764 764
765 765 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
766 766
767 767 wlock = self.wlock()
768 768 lock = self.lock()
769 769 tr = self.transaction()
770 770 trp = weakref.proxy(tr)
771 771
772 772 # check in files
773 773 new = {}
774 774 linkrev = self.changelog.count()
775 775 commit.sort()
776 776 is_exec = util.execfunc(self.root, m1.execf)
777 777 is_link = util.linkfunc(self.root, m1.linkf)
778 778 for f in commit:
779 779 self.ui.note(f + "\n")
780 780 try:
781 781 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
782 782 new_exec = is_exec(f)
783 783 new_link = is_link(f)
784 784 if ((not changed or changed[-1] != f) and
785 785 m2.get(f) != new[f]):
786 786 # mention the file in the changelog if some
787 787 # flag changed, even if there was no content
788 788 # change.
789 789 old_exec = m1.execf(f)
790 790 old_link = m1.linkf(f)
791 791 if old_exec != new_exec or old_link != new_link:
792 792 changed.append(f)
793 793 m1.set(f, new_exec, new_link)
794 794 if use_dirstate:
795 795 self.dirstate.normal(f)
796 796
797 797 except (OSError, IOError):
798 798 if use_dirstate:
799 799 self.ui.warn(_("trouble committing %s!\n") % f)
800 800 raise
801 801 else:
802 802 remove.append(f)
803 803
804 804 # update manifest
805 805 m1.update(new)
806 806 remove.sort()
807 807 removed = []
808 808
809 809 for f in remove:
810 810 if f in m1:
811 811 del m1[f]
812 812 removed.append(f)
813 813 elif f in m2:
814 814 removed.append(f)
815 815 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
816 816 (new, removed))
817 817
818 818 # add changeset
819 819 new = new.keys()
820 820 new.sort()
821 821
822 822 user = user or self.ui.username()
823 823 if (not empty_ok and not text) or force_editor:
824 824 edittext = []
825 825 if text:
826 826 edittext.append(text)
827 827 edittext.append("")
828 828 edittext.append(_("HG: Enter commit message."
829 829 " Lines beginning with 'HG:' are removed."))
830 830 edittext.append("HG: --")
831 831 edittext.append("HG: user: %s" % user)
832 832 if p2 != nullid:
833 833 edittext.append("HG: branch merge")
834 834 if branchname:
835 835 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
836 836 edittext.extend(["HG: changed %s" % f for f in changed])
837 837 edittext.extend(["HG: removed %s" % f for f in removed])
838 838 if not changed and not remove:
839 839 edittext.append("HG: no files changed")
840 840 edittext.append("")
841 841 # run editor in the repository root
842 842 olddir = os.getcwd()
843 843 os.chdir(self.root)
844 844 text = self.ui.edit("\n".join(edittext), user)
845 845 os.chdir(olddir)
846 846
847 847 if branchname:
848 848 extra["branch"] = branchname
849 849
850 850 if use_dirstate:
851 851 lines = [line.rstrip() for line in text.rstrip().splitlines()]
852 852 while lines and not lines[0]:
853 853 del lines[0]
854 854 if not lines:
855 855 raise util.Abort(_("empty commit message"))
856 856 text = '\n'.join(lines)
857 857
858 858 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
859 859 user, date, extra)
860 860 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
861 861 parent2=xp2)
862 862 tr.close()
863 863
864 864 if self.branchcache and "branch" in extra:
865 865 self.branchcache[util.tolocal(extra["branch"])] = n
866 866
867 867 if use_dirstate or update_dirstate:
868 868 self.dirstate.setparents(n)
869 869 if use_dirstate:
870 870 for f in removed:
871 871 self.dirstate.forget(f)
872 872 valid = 1 # our dirstate updates are complete
873 873
874 874 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
875 875 return n
876 876 finally:
877 877 if not valid: # don't save our updated dirstate
878 878 self.dirstate.invalidate()
879 879 del tr, lock, wlock
880 880
881 881 def walk(self, node=None, files=[], match=util.always, badmatch=None):
882 882 '''
883 883 walk recursively through the directory tree or a given
884 884 changeset, finding all files matched by the match
885 885 function
886 886
887 887 results are yielded in a tuple (src, filename), where src
888 888 is one of:
889 889 'f' the file was found in the directory tree
890 890 'm' the file was only in the dirstate and not in the tree
891 891 'b' file was not found and matched badmatch
892 892 '''
893 893
894 894 if node:
895 895 fdict = dict.fromkeys(files)
896 896 # for dirstate.walk, files=['.'] means "walk the whole tree".
897 897 # follow that here, too
898 898 fdict.pop('.', None)
899 899 mdict = self.manifest.read(self.changelog.read(node)[0])
900 900 mfiles = mdict.keys()
901 901 mfiles.sort()
902 902 for fn in mfiles:
903 903 for ffn in fdict:
904 904 # match if the file is the exact name or a directory
905 905 if ffn == fn or fn.startswith("%s/" % ffn):
906 906 del fdict[ffn]
907 907 break
908 908 if match(fn):
909 909 yield 'm', fn
910 910 ffiles = fdict.keys()
911 911 ffiles.sort()
912 912 for fn in ffiles:
913 913 if badmatch and badmatch(fn):
914 914 if match(fn):
915 915 yield 'b', fn
916 916 else:
917 917 self.ui.warn(_('%s: No such file in rev %s\n')
918 918 % (self.pathto(fn), short(node)))
919 919 else:
920 920 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
921 921 yield src, fn
922 922
923 923 def status(self, node1=None, node2=None, files=[], match=util.always,
924 924 list_ignored=False, list_clean=False):
925 925 """return status of files between two nodes or node and working directory
926 926
927 927 If node1 is None, use the first dirstate parent instead.
928 928 If node2 is None, compare node1 with working directory.
929 929 """
930 930
931 931 def fcmp(fn, getnode):
932 932 t1 = self.wread(fn)
933 933 return self.file(fn).cmp(getnode(fn), t1)
934 934
935 935 def mfmatches(node):
936 936 change = self.changelog.read(node)
937 937 mf = self.manifest.read(change[0]).copy()
938 938 for fn in mf.keys():
939 939 if not match(fn):
940 940 del mf[fn]
941 941 return mf
942 942
943 943 modified, added, removed, deleted, unknown = [], [], [], [], []
944 944 ignored, clean = [], []
945 945
946 946 compareworking = False
947 947 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
948 948 compareworking = True
949 949
950 950 if not compareworking:
951 951 # read the manifest from node1 before the manifest from node2,
952 952 # so that we'll hit the manifest cache if we're going through
953 953 # all the revisions in parent->child order.
954 954 mf1 = mfmatches(node1)
955 955
956 956 # are we comparing the working directory?
957 957 if not node2:
958 958 (lookup, modified, added, removed, deleted, unknown,
959 959 ignored, clean) = self.dirstate.status(files, match,
960 960 list_ignored, list_clean)
961 961
962 962 # are we comparing working dir against its parent?
963 963 if compareworking:
964 964 if lookup:
965 965 fixup = []
966 966 # do a full compare of any files that might have changed
967 967 ctx = self.changectx()
968 968 for f in lookup:
969 969 if f not in ctx or ctx[f].cmp(self.wread(f)):
970 970 modified.append(f)
971 971 else:
972 972 fixup.append(f)
973 973 if list_clean:
974 974 clean.append(f)
975 975
976 976 # update dirstate for files that are actually clean
977 977 if fixup:
978 978 wlock = None
979 979 try:
980 980 try:
981 981 wlock = self.wlock(False)
982 982 except lock.LockException:
983 983 pass
984 984 if wlock:
985 985 for f in fixup:
986 986 self.dirstate.normal(f)
987 987 finally:
988 988 del wlock
989 989 else:
990 990 # we are comparing working dir against non-parent
991 991 # generate a pseudo-manifest for the working dir
992 992 # XXX: create it in dirstate.py ?
993 993 mf2 = mfmatches(self.dirstate.parents()[0])
994 994 is_exec = util.execfunc(self.root, mf2.execf)
995 995 is_link = util.linkfunc(self.root, mf2.linkf)
996 996 for f in lookup + modified + added:
997 997 mf2[f] = ""
998 998 mf2.set(f, is_exec(f), is_link(f))
999 999 for f in removed:
1000 1000 if f in mf2:
1001 1001 del mf2[f]
1002 1002
1003 1003 else:
1004 1004 # we are comparing two revisions
1005 1005 mf2 = mfmatches(node2)
1006 1006
1007 1007 if not compareworking:
1008 1008 # flush lists from dirstate before comparing manifests
1009 1009 modified, added, clean = [], [], []
1010 1010
1011 1011 # make sure to sort the files so we talk to the disk in a
1012 1012 # reasonable order
1013 1013 mf2keys = mf2.keys()
1014 1014 mf2keys.sort()
1015 1015 getnode = lambda fn: mf1.get(fn, nullid)
1016 1016 for fn in mf2keys:
1017 1017 if fn in mf1:
1018 1018 if (mf1.flags(fn) != mf2.flags(fn) or
1019 1019 (mf1[fn] != mf2[fn] and
1020 1020 (mf2[fn] != "" or fcmp(fn, getnode)))):
1021 1021 modified.append(fn)
1022 1022 elif list_clean:
1023 1023 clean.append(fn)
1024 1024 del mf1[fn]
1025 1025 else:
1026 1026 added.append(fn)
1027 1027
1028 1028 removed = mf1.keys()
1029 1029
1030 1030 # sort and return results:
1031 1031 for l in modified, added, removed, deleted, unknown, ignored, clean:
1032 1032 l.sort()
1033 1033 return (modified, added, removed, deleted, unknown, ignored, clean)
1034 1034
1035 1035 def add(self, list):
1036 1036 wlock = self.wlock()
1037 1037 try:
1038 1038 rejected = []
1039 1039 for f in list:
1040 1040 p = self.wjoin(f)
1041 1041 try:
1042 1042 st = os.lstat(p)
1043 1043 except:
1044 1044 self.ui.warn(_("%s does not exist!\n") % f)
1045 1045 rejected.append(f)
1046 1046 continue
1047 1047 if st.st_size > 10000000:
1048 1048 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 1049 " performance problems\n"
1050 1050 "(use 'hg revert %s' to unadd the file)\n")
1051 1051 % (f, f))
1052 1052 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 1053 self.ui.warn(_("%s not added: only files and symlinks "
1054 1054 "supported currently\n") % f)
1055 1055 rejected.append(p)
1056 1056 elif self.dirstate[f] in 'amn':
1057 1057 self.ui.warn(_("%s already tracked!\n") % f)
1058 1058 elif self.dirstate[f] == 'r':
1059 1059 self.dirstate.normallookup(f)
1060 1060 else:
1061 1061 self.dirstate.add(f)
1062 1062 return rejected
1063 1063 finally:
1064 1064 del wlock
1065 1065
1066 1066 def forget(self, list):
1067 1067 wlock = self.wlock()
1068 1068 try:
1069 1069 for f in list:
1070 1070 if self.dirstate[f] != 'a':
1071 1071 self.ui.warn(_("%s not added!\n") % f)
1072 1072 else:
1073 1073 self.dirstate.forget(f)
1074 1074 finally:
1075 1075 del wlock
1076 1076
1077 1077 def remove(self, list, unlink=False):
1078 1078 wlock = None
1079 1079 try:
1080 1080 if unlink:
1081 1081 for f in list:
1082 1082 try:
1083 1083 util.unlink(self.wjoin(f))
1084 1084 except OSError, inst:
1085 1085 if inst.errno != errno.ENOENT:
1086 1086 raise
1087 1087 wlock = self.wlock()
1088 1088 for f in list:
1089 1089 if unlink and os.path.exists(self.wjoin(f)):
1090 1090 self.ui.warn(_("%s still exists!\n") % f)
1091 1091 elif self.dirstate[f] == 'a':
1092 1092 self.dirstate.forget(f)
1093 1093 elif f not in self.dirstate:
1094 1094 self.ui.warn(_("%s not tracked!\n") % f)
1095 1095 else:
1096 1096 self.dirstate.remove(f)
1097 1097 finally:
1098 1098 del wlock
1099 1099
1100 1100 def undelete(self, list):
1101 1101 wlock = None
1102 1102 try:
1103 1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 1104 for p in self.dirstate.parents() if p != nullid]
1105 1105 wlock = self.wlock()
1106 1106 for f in list:
1107 1107 if self.dirstate[f] != 'r':
1108 1108 self.ui.warn("%s not removed!\n" % f)
1109 1109 else:
1110 1110 m = f in manifests[0] and manifests[0] or manifests[1]
1111 1111 t = self.file(f).read(m[f])
1112 1112 self.wwrite(f, t, m.flags(f))
1113 1113 self.dirstate.normal(f)
1114 1114 finally:
1115 1115 del wlock
1116 1116
1117 1117 def copy(self, source, dest):
1118 1118 wlock = None
1119 1119 try:
1120 1120 p = self.wjoin(dest)
1121 1121 if not (os.path.exists(p) or os.path.islink(p)):
1122 1122 self.ui.warn(_("%s does not exist!\n") % dest)
1123 1123 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 1124 self.ui.warn(_("copy failed: %s is not a file or a "
1125 1125 "symbolic link\n") % dest)
1126 1126 else:
1127 1127 wlock = self.wlock()
1128 1128 if dest not in self.dirstate:
1129 1129 self.dirstate.add(dest)
1130 1130 self.dirstate.copy(source, dest)
1131 1131 finally:
1132 1132 del wlock
1133 1133
1134 1134 def heads(self, start=None):
1135 1135 heads = self.changelog.heads(start)
1136 1136 # sort the output in rev descending order
1137 1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 1138 heads.sort()
1139 1139 return [n for (r, n) in heads]
1140 1140
1141 1141 def branchheads(self, branch, start=None):
1142 1142 branches = self.branchtags()
1143 1143 if branch not in branches:
1144 1144 return []
1145 1145 # The basic algorithm is this:
1146 1146 #
1147 1147 # Start from the branch tip since there are no later revisions that can
1148 1148 # possibly be in this branch, and the tip is a guaranteed head.
1149 1149 #
1150 1150 # Remember the tip's parents as the first ancestors, since these by
1151 1151 # definition are not heads.
1152 1152 #
1153 1153 # Step backwards from the brach tip through all the revisions. We are
1154 1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1155 1155 # nodes in reverse topological order (children before parents).
1156 1156 #
1157 1157 # If a revision is one of the ancestors of a head then we can toss it
1158 1158 # out of the ancestors set (we've already found it and won't be
1159 1159 # visiting it again) and put its parents in the ancestors set.
1160 1160 #
1161 1161 # Otherwise, if a revision is in the branch it's another head, since it
1162 1162 # wasn't in the ancestor list of an existing head. So add it to the
1163 1163 # head list, and add its parents to the ancestor list.
1164 1164 #
1165 1165 # If it is not in the branch ignore it.
1166 1166 #
1167 1167 # Once we have a list of heads, use nodesbetween to filter out all the
1168 1168 # heads that cannot be reached from startrev. There may be a more
1169 1169 # efficient way to do this as part of the previous algorithm.
1170 1170
1171 1171 set = util.set
1172 1172 heads = [self.changelog.rev(branches[branch])]
1173 1173 # Don't care if ancestors contains nullrev or not.
1174 1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1175 1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1176 1176 if rev in ancestors:
1177 1177 ancestors.update(self.changelog.parentrevs(rev))
1178 1178 ancestors.remove(rev)
1179 1179 elif self.changectx(rev).branch() == branch:
1180 1180 heads.append(rev)
1181 1181 ancestors.update(self.changelog.parentrevs(rev))
1182 1182 heads = [self.changelog.node(rev) for rev in heads]
1183 1183 if start is not None:
1184 1184 heads = self.changelog.nodesbetween([start], heads)[2]
1185 1185 return heads
1186 1186
1187 1187 def branches(self, nodes):
1188 1188 if not nodes:
1189 1189 nodes = [self.changelog.tip()]
1190 1190 b = []
1191 1191 for n in nodes:
1192 1192 t = n
1193 1193 while 1:
1194 1194 p = self.changelog.parents(n)
1195 1195 if p[1] != nullid or p[0] == nullid:
1196 1196 b.append((t, n, p[0], p[1]))
1197 1197 break
1198 1198 n = p[0]
1199 1199 return b
1200 1200
1201 1201 def between(self, pairs):
1202 1202 r = []
1203 1203
1204 1204 for top, bottom in pairs:
1205 1205 n, l, i = top, [], 0
1206 1206 f = 1
1207 1207
1208 1208 while n != bottom:
1209 1209 p = self.changelog.parents(n)[0]
1210 1210 if i == f:
1211 1211 l.append(n)
1212 1212 f = f * 2
1213 1213 n = p
1214 1214 i += 1
1215 1215
1216 1216 r.append(l)
1217 1217
1218 1218 return r
1219 1219
1220 1220 def findincoming(self, remote, base=None, heads=None, force=False):
1221 1221 """Return list of roots of the subsets of missing nodes from remote
1222 1222
1223 1223 If base dict is specified, assume that these nodes and their parents
1224 1224 exist on the remote side and that no child of a node of base exists
1225 1225 in both remote and self.
1226 1226 Furthermore base will be updated to include the nodes that exists
1227 1227 in self and remote but no children exists in self and remote.
1228 1228 If a list of heads is specified, return only nodes which are heads
1229 1229 or ancestors of these heads.
1230 1230
1231 1231 All the ancestors of base are in self and in remote.
1232 1232 All the descendants of the list returned are missing in self.
1233 1233 (and so we know that the rest of the nodes are missing in remote, see
1234 1234 outgoing)
1235 1235 """
1236 1236 m = self.changelog.nodemap
1237 1237 search = []
1238 1238 fetch = {}
1239 1239 seen = {}
1240 1240 seenbranch = {}
1241 1241 if base == None:
1242 1242 base = {}
1243 1243
1244 1244 if not heads:
1245 1245 heads = remote.heads()
1246 1246
1247 1247 if self.changelog.tip() == nullid:
1248 1248 base[nullid] = 1
1249 1249 if heads != [nullid]:
1250 1250 return [nullid]
1251 1251 return []
1252 1252
1253 1253 # assume we're closer to the tip than the root
1254 1254 # and start by examining the heads
1255 1255 self.ui.status(_("searching for changes\n"))
1256 1256
1257 1257 unknown = []
1258 1258 for h in heads:
1259 1259 if h not in m:
1260 1260 unknown.append(h)
1261 1261 else:
1262 1262 base[h] = 1
1263 1263
1264 1264 if not unknown:
1265 1265 return []
1266 1266
1267 1267 req = dict.fromkeys(unknown)
1268 1268 reqcnt = 0
1269 1269
1270 1270 # search through remote branches
1271 1271 # a 'branch' here is a linear segment of history, with four parts:
1272 1272 # head, root, first parent, second parent
1273 1273 # (a branch always has two parents (or none) by definition)
1274 1274 unknown = remote.branches(unknown)
1275 1275 while unknown:
1276 1276 r = []
1277 1277 while unknown:
1278 1278 n = unknown.pop(0)
1279 1279 if n[0] in seen:
1280 1280 continue
1281 1281
1282 1282 self.ui.debug(_("examining %s:%s\n")
1283 1283 % (short(n[0]), short(n[1])))
1284 1284 if n[0] == nullid: # found the end of the branch
1285 1285 pass
1286 1286 elif n in seenbranch:
1287 1287 self.ui.debug(_("branch already found\n"))
1288 1288 continue
1289 1289 elif n[1] and n[1] in m: # do we know the base?
1290 1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 1291 % (short(n[0]), short(n[1])))
1292 1292 search.append(n) # schedule branch range for scanning
1293 1293 seenbranch[n] = 1
1294 1294 else:
1295 1295 if n[1] not in seen and n[1] not in fetch:
1296 1296 if n[2] in m and n[3] in m:
1297 1297 self.ui.debug(_("found new changeset %s\n") %
1298 1298 short(n[1]))
1299 1299 fetch[n[1]] = 1 # earliest unknown
1300 1300 for p in n[2:4]:
1301 1301 if p in m:
1302 1302 base[p] = 1 # latest known
1303 1303
1304 1304 for p in n[2:4]:
1305 1305 if p not in req and p not in m:
1306 1306 r.append(p)
1307 1307 req[p] = 1
1308 1308 seen[n[0]] = 1
1309 1309
1310 1310 if r:
1311 1311 reqcnt += 1
1312 1312 self.ui.debug(_("request %d: %s\n") %
1313 1313 (reqcnt, " ".join(map(short, r))))
1314 1314 for p in xrange(0, len(r), 10):
1315 1315 for b in remote.branches(r[p:p+10]):
1316 1316 self.ui.debug(_("received %s:%s\n") %
1317 1317 (short(b[0]), short(b[1])))
1318 1318 unknown.append(b)
1319 1319
1320 1320 # do binary search on the branches we found
1321 1321 while search:
1322 1322 n = search.pop(0)
1323 1323 reqcnt += 1
1324 1324 l = remote.between([(n[0], n[1])])[0]
1325 1325 l.append(n[1])
1326 1326 p = n[0]
1327 1327 f = 1
1328 1328 for i in l:
1329 1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 1330 if i in m:
1331 1331 if f <= 2:
1332 1332 self.ui.debug(_("found new branch changeset %s\n") %
1333 1333 short(p))
1334 1334 fetch[p] = 1
1335 1335 base[i] = 1
1336 1336 else:
1337 1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 1338 % (short(p), short(i)))
1339 1339 search.append((p, i))
1340 1340 break
1341 1341 p, f = i, f * 2
1342 1342
1343 1343 # sanity check our fetch list
1344 1344 for f in fetch.keys():
1345 1345 if f in m:
1346 1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1347 1347
1348 1348 if base.keys() == [nullid]:
1349 1349 if force:
1350 1350 self.ui.warn(_("warning: repository is unrelated\n"))
1351 1351 else:
1352 1352 raise util.Abort(_("repository is unrelated"))
1353 1353
1354 1354 self.ui.debug(_("found new changesets starting at ") +
1355 1355 " ".join([short(f) for f in fetch]) + "\n")
1356 1356
1357 1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1358 1358
1359 1359 return fetch.keys()
1360 1360
1361 1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1362 1362 """Return list of nodes that are roots of subsets not in remote
1363 1363
1364 1364 If base dict is specified, assume that these nodes and their parents
1365 1365 exist on the remote side.
1366 1366 If a list of heads is specified, return only nodes which are heads
1367 1367 or ancestors of these heads, and return a second element which
1368 1368 contains all remote heads which get new children.
1369 1369 """
1370 1370 if base == None:
1371 1371 base = {}
1372 1372 self.findincoming(remote, base, heads, force=force)
1373 1373
1374 1374 self.ui.debug(_("common changesets up to ")
1375 1375 + " ".join(map(short, base.keys())) + "\n")
1376 1376
1377 1377 remain = dict.fromkeys(self.changelog.nodemap)
1378 1378
1379 1379 # prune everything remote has from the tree
1380 1380 del remain[nullid]
1381 1381 remove = base.keys()
1382 1382 while remove:
1383 1383 n = remove.pop(0)
1384 1384 if n in remain:
1385 1385 del remain[n]
1386 1386 for p in self.changelog.parents(n):
1387 1387 remove.append(p)
1388 1388
1389 1389 # find every node whose parents have been pruned
1390 1390 subset = []
1391 1391 # find every remote head that will get new children
1392 1392 updated_heads = {}
1393 1393 for n in remain:
1394 1394 p1, p2 = self.changelog.parents(n)
1395 1395 if p1 not in remain and p2 not in remain:
1396 1396 subset.append(n)
1397 1397 if heads:
1398 1398 if p1 in heads:
1399 1399 updated_heads[p1] = True
1400 1400 if p2 in heads:
1401 1401 updated_heads[p2] = True
1402 1402
1403 1403 # this is the set of all roots we have to push
1404 1404 if heads:
1405 1405 return subset, updated_heads.keys()
1406 1406 else:
1407 1407 return subset
1408 1408
1409 1409 def pull(self, remote, heads=None, force=False):
1410 1410 lock = self.lock()
1411 1411 try:
1412 1412 fetch = self.findincoming(remote, heads=heads, force=force)
1413 1413 if fetch == [nullid]:
1414 1414 self.ui.status(_("requesting all changes\n"))
1415 1415
1416 1416 if not fetch:
1417 1417 self.ui.status(_("no changes found\n"))
1418 1418 return 0
1419 1419
1420 1420 if heads is None:
1421 1421 cg = remote.changegroup(fetch, 'pull')
1422 1422 else:
1423 1423 if 'changegroupsubset' not in remote.capabilities:
1424 1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1425 1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1426 1426 return self.addchangegroup(cg, 'pull', remote.url())
1427 1427 finally:
1428 1428 del lock
1429 1429
1430 1430 def push(self, remote, force=False, revs=None):
1431 1431 # there are two ways to push to remote repo:
1432 1432 #
1433 1433 # addchangegroup assumes local user can lock remote
1434 1434 # repo (local filesystem, old ssh servers).
1435 1435 #
1436 1436 # unbundle assumes local user cannot lock remote repo (new ssh
1437 1437 # servers, http servers).
1438 1438
1439 1439 if remote.capable('unbundle'):
1440 1440 return self.push_unbundle(remote, force, revs)
1441 1441 return self.push_addchangegroup(remote, force, revs)
1442 1442
1443 1443 def prepush(self, remote, force, revs):
1444 1444 base = {}
1445 1445 remote_heads = remote.heads()
1446 1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1447 1447
1448 1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1449 1449 if revs is not None:
1450 1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1451 1451 else:
1452 1452 bases, heads = update, self.changelog.heads()
1453 1453
1454 1454 if not bases:
1455 1455 self.ui.status(_("no changes found\n"))
1456 1456 return None, 1
1457 1457 elif not force:
1458 1458 # check if we're creating new remote heads
1459 1459 # to be a remote head after push, node must be either
1460 1460 # - unknown locally
1461 1461 # - a local outgoing head descended from update
1462 1462 # - a remote head that's known locally and not
1463 1463 # ancestral to an outgoing head
1464 1464
1465 1465 warn = 0
1466 1466
1467 1467 if remote_heads == [nullid]:
1468 1468 warn = 0
1469 1469 elif not revs and len(heads) > len(remote_heads):
1470 1470 warn = 1
1471 1471 else:
1472 1472 newheads = list(heads)
1473 1473 for r in remote_heads:
1474 1474 if r in self.changelog.nodemap:
1475 1475 desc = self.changelog.heads(r, heads)
1476 1476 l = [h for h in heads if h in desc]
1477 1477 if not l:
1478 1478 newheads.append(r)
1479 1479 else:
1480 1480 newheads.append(r)
1481 1481 if len(newheads) > len(remote_heads):
1482 1482 warn = 1
1483 1483
1484 1484 if warn:
1485 1485 self.ui.warn(_("abort: push creates new remote branches!\n"))
1486 1486 self.ui.status(_("(did you forget to merge?"
1487 1487 " use push -f to force)\n"))
1488 1488 return None, 1
1489 1489 elif inc:
1490 1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1491 1491
1492 1492
1493 1493 if revs is None:
1494 1494 cg = self.changegroup(update, 'push')
1495 1495 else:
1496 1496 cg = self.changegroupsubset(update, revs, 'push')
1497 1497 return cg, remote_heads
1498 1498
1499 1499 def push_addchangegroup(self, remote, force, revs):
1500 1500 lock = remote.lock()
1501 1501 try:
1502 1502 ret = self.prepush(remote, force, revs)
1503 1503 if ret[0] is not None:
1504 1504 cg, remote_heads = ret
1505 1505 return remote.addchangegroup(cg, 'push', self.url())
1506 1506 return ret[1]
1507 1507 finally:
1508 1508 del lock
1509 1509
1510 1510 def push_unbundle(self, remote, force, revs):
1511 1511 # local repo finds heads on server, finds out what revs it
1512 1512 # must push. once revs transferred, if server finds it has
1513 1513 # different heads (someone else won commit/push race), server
1514 1514 # aborts.
1515 1515
1516 1516 ret = self.prepush(remote, force, revs)
1517 1517 if ret[0] is not None:
1518 1518 cg, remote_heads = ret
1519 1519 if force: remote_heads = ['force']
1520 1520 return remote.unbundle(cg, remote_heads, 'push')
1521 1521 return ret[1]
1522 1522
1523 1523 def changegroupinfo(self, nodes, source):
1524 1524 if self.ui.verbose or source == 'bundle':
1525 1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1526 1526 if self.ui.debugflag:
1527 1527 self.ui.debug(_("List of changesets:\n"))
1528 1528 for node in nodes:
1529 1529 self.ui.debug("%s\n" % hex(node))
1530 1530
1531 1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1532 1532 """This function generates a changegroup consisting of all the nodes
1533 1533 that are descendents of any of the bases, and ancestors of any of
1534 1534 the heads.
1535 1535
1536 1536 It is fairly complex as determining which filenodes and which
1537 1537 manifest nodes need to be included for the changeset to be complete
1538 1538 is non-trivial.
1539 1539
1540 1540 Another wrinkle is doing the reverse, figuring out which changeset in
1541 1541 the changegroup a particular filenode or manifestnode belongs to.
1542 1542
1543 1543 The caller can specify some nodes that must be included in the
1544 1544 changegroup using the extranodes argument. It should be a dict
1545 1545 where the keys are the filenames (or 1 for the manifest), and the
1546 1546 values are lists of (node, linknode) tuples, where node is a wanted
1547 1547 node and linknode is the changelog node that should be transmitted as
1548 1548 the linkrev.
1549 1549 """
1550 1550
1551 1551 self.hook('preoutgoing', throw=True, source=source)
1552 1552
1553 1553 # Set up some initial variables
1554 1554 # Make it easy to refer to self.changelog
1555 1555 cl = self.changelog
1556 1556 # msng is short for missing - compute the list of changesets in this
1557 1557 # changegroup.
1558 1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1559 1559 self.changegroupinfo(msng_cl_lst, source)
1560 1560 # Some bases may turn out to be superfluous, and some heads may be
1561 1561 # too. nodesbetween will return the minimal set of bases and heads
1562 1562 # necessary to re-create the changegroup.
1563 1563
1564 1564 # Known heads are the list of heads that it is assumed the recipient
1565 1565 # of this changegroup will know about.
1566 1566 knownheads = {}
1567 1567 # We assume that all parents of bases are known heads.
1568 1568 for n in bases:
1569 1569 for p in cl.parents(n):
1570 1570 if p != nullid:
1571 1571 knownheads[p] = 1
1572 1572 knownheads = knownheads.keys()
1573 1573 if knownheads:
1574 1574 # Now that we know what heads are known, we can compute which
1575 1575 # changesets are known. The recipient must know about all
1576 1576 # changesets required to reach the known heads from the null
1577 1577 # changeset.
1578 1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1579 1579 junk = None
1580 1580 # Transform the list into an ersatz set.
1581 1581 has_cl_set = dict.fromkeys(has_cl_set)
1582 1582 else:
1583 1583 # If there were no known heads, the recipient cannot be assumed to
1584 1584 # know about any changesets.
1585 1585 has_cl_set = {}
1586 1586
1587 1587 # Make it easy to refer to self.manifest
1588 1588 mnfst = self.manifest
1589 1589 # We don't know which manifests are missing yet
1590 1590 msng_mnfst_set = {}
1591 1591 # Nor do we know which filenodes are missing.
1592 1592 msng_filenode_set = {}
1593 1593
1594 1594 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1595 1595 junk = None
1596 1596
1597 1597 # A changeset always belongs to itself, so the changenode lookup
1598 1598 # function for a changenode is identity.
1599 1599 def identity(x):
1600 1600 return x
1601 1601
1602 1602 # A function generating function. Sets up an environment for the
1603 1603 # inner function.
1604 1604 def cmp_by_rev_func(revlog):
1605 1605 # Compare two nodes by their revision number in the environment's
1606 1606 # revision history. Since the revision number both represents the
1607 1607 # most efficient order to read the nodes in, and represents a
1608 1608 # topological sorting of the nodes, this function is often useful.
1609 1609 def cmp_by_rev(a, b):
1610 1610 return cmp(revlog.rev(a), revlog.rev(b))
1611 1611 return cmp_by_rev
1612 1612
1613 1613 # If we determine that a particular file or manifest node must be a
1614 1614 # node that the recipient of the changegroup will already have, we can
1615 1615 # also assume the recipient will have all the parents. This function
1616 1616 # prunes them from the set of missing nodes.
1617 1617 def prune_parents(revlog, hasset, msngset):
1618 1618 haslst = hasset.keys()
1619 1619 haslst.sort(cmp_by_rev_func(revlog))
1620 1620 for node in haslst:
1621 1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1622 1622 while parentlst:
1623 1623 n = parentlst.pop()
1624 1624 if n not in hasset:
1625 1625 hasset[n] = 1
1626 1626 p = [p for p in revlog.parents(n) if p != nullid]
1627 1627 parentlst.extend(p)
1628 1628 for n in hasset:
1629 1629 msngset.pop(n, None)
1630 1630
1631 1631 # This is a function generating function used to set up an environment
1632 1632 # for the inner function to execute in.
1633 1633 def manifest_and_file_collector(changedfileset):
1634 1634 # This is an information gathering function that gathers
1635 1635 # information from each changeset node that goes out as part of
1636 1636 # the changegroup. The information gathered is a list of which
1637 1637 # manifest nodes are potentially required (the recipient may
1638 1638 # already have them) and total list of all files which were
1639 1639 # changed in any changeset in the changegroup.
1640 1640 #
1641 1641 # We also remember the first changenode we saw any manifest
1642 1642 # referenced by so we can later determine which changenode 'owns'
1643 1643 # the manifest.
1644 1644 def collect_manifests_and_files(clnode):
1645 1645 c = cl.read(clnode)
1646 1646 for f in c[3]:
1647 1647 # This is to make sure we only have one instance of each
1648 1648 # filename string for each filename.
1649 1649 changedfileset.setdefault(f, f)
1650 1650 msng_mnfst_set.setdefault(c[0], clnode)
1651 1651 return collect_manifests_and_files
1652 1652
1653 1653 # Figure out which manifest nodes (of the ones we think might be part
1654 1654 # of the changegroup) the recipient must know about and remove them
1655 1655 # from the changegroup.
1656 1656 def prune_manifests():
1657 1657 has_mnfst_set = {}
1658 1658 for n in msng_mnfst_set:
1659 1659 # If a 'missing' manifest thinks it belongs to a changenode
1660 1660 # the recipient is assumed to have, obviously the recipient
1661 1661 # must have that manifest.
1662 1662 linknode = cl.node(mnfst.linkrev(n))
1663 1663 if linknode in has_cl_set:
1664 1664 has_mnfst_set[n] = 1
1665 1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1666 1666
1667 1667 # Use the information collected in collect_manifests_and_files to say
1668 1668 # which changenode any manifestnode belongs to.
1669 1669 def lookup_manifest_link(mnfstnode):
1670 1670 return msng_mnfst_set[mnfstnode]
1671 1671
1672 1672 # A function generating function that sets up the initial environment
1673 1673 # the inner function.
1674 1674 def filenode_collector(changedfiles):
1675 1675 next_rev = [0]
1676 1676 # This gathers information from each manifestnode included in the
1677 1677 # changegroup about which filenodes the manifest node references
1678 1678 # so we can include those in the changegroup too.
1679 1679 #
1680 1680 # It also remembers which changenode each filenode belongs to. It
1681 1681 # does this by assuming the a filenode belongs to the changenode
1682 1682 # the first manifest that references it belongs to.
1683 1683 def collect_msng_filenodes(mnfstnode):
1684 1684 r = mnfst.rev(mnfstnode)
1685 1685 if r == next_rev[0]:
1686 1686 # If the last rev we looked at was the one just previous,
1687 1687 # we only need to see a diff.
1688 1688 deltamf = mnfst.readdelta(mnfstnode)
1689 1689 # For each line in the delta
1690 1690 for f, fnode in deltamf.items():
1691 1691 f = changedfiles.get(f, None)
1692 1692 # And if the file is in the list of files we care
1693 1693 # about.
1694 1694 if f is not None:
1695 1695 # Get the changenode this manifest belongs to
1696 1696 clnode = msng_mnfst_set[mnfstnode]
1697 1697 # Create the set of filenodes for the file if
1698 1698 # there isn't one already.
1699 1699 ndset = msng_filenode_set.setdefault(f, {})
1700 1700 # And set the filenode's changelog node to the
1701 1701 # manifest's if it hasn't been set already.
1702 1702 ndset.setdefault(fnode, clnode)
1703 1703 else:
1704 1704 # Otherwise we need a full manifest.
1705 1705 m = mnfst.read(mnfstnode)
1706 1706 # For every file in we care about.
1707 1707 for f in changedfiles:
1708 1708 fnode = m.get(f, None)
1709 1709 # If it's in the manifest
1710 1710 if fnode is not None:
1711 1711 # See comments above.
1712 1712 clnode = msng_mnfst_set[mnfstnode]
1713 1713 ndset = msng_filenode_set.setdefault(f, {})
1714 1714 ndset.setdefault(fnode, clnode)
1715 1715 # Remember the revision we hope to see next.
1716 1716 next_rev[0] = r + 1
1717 1717 return collect_msng_filenodes
1718 1718
1719 1719 # We have a list of filenodes we think we need for a file, lets remove
1720 1720 # all those we now the recipient must have.
1721 1721 def prune_filenodes(f, filerevlog):
1722 1722 msngset = msng_filenode_set[f]
1723 1723 hasset = {}
1724 1724 # If a 'missing' filenode thinks it belongs to a changenode we
1725 1725 # assume the recipient must have, then the recipient must have
1726 1726 # that filenode.
1727 1727 for n in msngset:
1728 1728 clnode = cl.node(filerevlog.linkrev(n))
1729 1729 if clnode in has_cl_set:
1730 1730 hasset[n] = 1
1731 1731 prune_parents(filerevlog, hasset, msngset)
1732 1732
1733 1733 # A function generator function that sets up the a context for the
1734 1734 # inner function.
1735 1735 def lookup_filenode_link_func(fname):
1736 1736 msngset = msng_filenode_set[fname]
1737 1737 # Lookup the changenode the filenode belongs to.
1738 1738 def lookup_filenode_link(fnode):
1739 1739 return msngset[fnode]
1740 1740 return lookup_filenode_link
1741 1741
1742 1742 # Add the nodes that were explicitly requested.
1743 1743 def add_extra_nodes(name, nodes):
1744 1744 if not extranodes or name not in extranodes:
1745 1745 return
1746 1746
1747 1747 for node, linknode in extranodes[name]:
1748 1748 if node not in nodes:
1749 1749 nodes[node] = linknode
1750 1750
1751 1751 # Now that we have all theses utility functions to help out and
1752 1752 # logically divide up the task, generate the group.
1753 1753 def gengroup():
1754 1754 # The set of changed files starts empty.
1755 1755 changedfiles = {}
1756 1756 # Create a changenode group generator that will call our functions
1757 1757 # back to lookup the owning changenode and collect information.
1758 1758 group = cl.group(msng_cl_lst, identity,
1759 1759 manifest_and_file_collector(changedfiles))
1760 1760 for chnk in group:
1761 1761 yield chnk
1762 1762
1763 1763 # The list of manifests has been collected by the generator
1764 1764 # calling our functions back.
1765 1765 prune_manifests()
1766 1766 add_extra_nodes(1, msng_mnfst_set)
1767 1767 msng_mnfst_lst = msng_mnfst_set.keys()
1768 1768 # Sort the manifestnodes by revision number.
1769 1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1770 1770 # Create a generator for the manifestnodes that calls our lookup
1771 1771 # and data collection functions back.
1772 1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1773 1773 filenode_collector(changedfiles))
1774 1774 for chnk in group:
1775 1775 yield chnk
1776 1776
1777 1777 # These are no longer needed, dereference and toss the memory for
1778 1778 # them.
1779 1779 msng_mnfst_lst = None
1780 1780 msng_mnfst_set.clear()
1781 1781
1782 1782 if extranodes:
1783 1783 for fname in extranodes:
1784 1784 if isinstance(fname, int):
1785 1785 continue
1786 1786 add_extra_nodes(fname,
1787 1787 msng_filenode_set.setdefault(fname, {}))
1788 1788 changedfiles[fname] = 1
1789 1789 changedfiles = changedfiles.keys()
1790 1790 changedfiles.sort()
1791 1791 # Go through all our files in order sorted by name.
1792 1792 for fname in changedfiles:
1793 1793 filerevlog = self.file(fname)
1794 1794 if filerevlog.count() == 0:
1795 1795 raise util.Abort(_("empty or missing revlog for %s") % fname)
1796 1796 # Toss out the filenodes that the recipient isn't really
1797 1797 # missing.
1798 1798 if fname in msng_filenode_set:
1799 1799 prune_filenodes(fname, filerevlog)
1800 1800 msng_filenode_lst = msng_filenode_set[fname].keys()
1801 1801 else:
1802 1802 msng_filenode_lst = []
1803 1803 # If any filenodes are left, generate the group for them,
1804 1804 # otherwise don't bother.
1805 1805 if len(msng_filenode_lst) > 0:
1806 1806 yield changegroup.chunkheader(len(fname))
1807 1807 yield fname
1808 1808 # Sort the filenodes by their revision #
1809 1809 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1810 1810 # Create a group generator and only pass in a changenode
1811 1811 # lookup function as we need to collect no information
1812 1812 # from filenodes.
1813 1813 group = filerevlog.group(msng_filenode_lst,
1814 1814 lookup_filenode_link_func(fname))
1815 1815 for chnk in group:
1816 1816 yield chnk
1817 1817 if fname in msng_filenode_set:
1818 1818 # Don't need this anymore, toss it to free memory.
1819 1819 del msng_filenode_set[fname]
1820 1820 # Signal that no more groups are left.
1821 1821 yield changegroup.closechunk()
1822 1822
1823 1823 if msng_cl_lst:
1824 1824 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1825 1825
1826 1826 return util.chunkbuffer(gengroup())
1827 1827
1828 1828 def changegroup(self, basenodes, source):
1829 1829 """Generate a changegroup of all nodes that we have that a recipient
1830 1830 doesn't.
1831 1831
1832 1832 This is much easier than the previous function as we can assume that
1833 1833 the recipient has any changenode we aren't sending them."""
1834 1834
1835 1835 self.hook('preoutgoing', throw=True, source=source)
1836 1836
1837 1837 cl = self.changelog
1838 1838 nodes = cl.nodesbetween(basenodes, None)[0]
1839 1839 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1840 1840 self.changegroupinfo(nodes, source)
1841 1841
1842 1842 def identity(x):
1843 1843 return x
1844 1844
1845 1845 def gennodelst(revlog):
1846 1846 for r in xrange(0, revlog.count()):
1847 1847 n = revlog.node(r)
1848 1848 if revlog.linkrev(n) in revset:
1849 1849 yield n
1850 1850
1851 1851 def changed_file_collector(changedfileset):
1852 1852 def collect_changed_files(clnode):
1853 1853 c = cl.read(clnode)
1854 1854 for fname in c[3]:
1855 1855 changedfileset[fname] = 1
1856 1856 return collect_changed_files
1857 1857
1858 1858 def lookuprevlink_func(revlog):
1859 1859 def lookuprevlink(n):
1860 1860 return cl.node(revlog.linkrev(n))
1861 1861 return lookuprevlink
1862 1862
1863 1863 def gengroup():
1864 1864 # construct a list of all changed files
1865 1865 changedfiles = {}
1866 1866
1867 1867 for chnk in cl.group(nodes, identity,
1868 1868 changed_file_collector(changedfiles)):
1869 1869 yield chnk
1870 1870 changedfiles = changedfiles.keys()
1871 1871 changedfiles.sort()
1872 1872
1873 1873 mnfst = self.manifest
1874 1874 nodeiter = gennodelst(mnfst)
1875 1875 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1876 1876 yield chnk
1877 1877
1878 1878 for fname in changedfiles:
1879 1879 filerevlog = self.file(fname)
1880 1880 if filerevlog.count() == 0:
1881 1881 raise util.Abort(_("empty or missing revlog for %s") % fname)
1882 1882 nodeiter = gennodelst(filerevlog)
1883 1883 nodeiter = list(nodeiter)
1884 1884 if nodeiter:
1885 1885 yield changegroup.chunkheader(len(fname))
1886 1886 yield fname
1887 1887 lookup = lookuprevlink_func(filerevlog)
1888 1888 for chnk in filerevlog.group(nodeiter, lookup):
1889 1889 yield chnk
1890 1890
1891 1891 yield changegroup.closechunk()
1892 1892
1893 1893 if nodes:
1894 1894 self.hook('outgoing', node=hex(nodes[0]), source=source)
1895 1895
1896 1896 return util.chunkbuffer(gengroup())
1897 1897
1898 1898 def addchangegroup(self, source, srctype, url, emptyok=False):
1899 1899 """add changegroup to repo.
1900 1900
1901 1901 return values:
1902 1902 - nothing changed or no source: 0
1903 1903 - more heads than before: 1+added heads (2..n)
1904 1904 - less heads than before: -1-removed heads (-2..-n)
1905 1905 - number of heads stays the same: 1
1906 1906 """
1907 1907 def csmap(x):
1908 1908 self.ui.debug(_("add changeset %s\n") % short(x))
1909 1909 return cl.count()
1910 1910
1911 1911 def revmap(x):
1912 1912 return cl.rev(x)
1913 1913
1914 1914 if not source:
1915 1915 return 0
1916 1916
1917 1917 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1918 1918
1919 1919 changesets = files = revisions = 0
1920 1920
1921 1921 # write changelog data to temp files so concurrent readers will not see
1922 1922 # inconsistent view
1923 1923 cl = self.changelog
1924 1924 cl.delayupdate()
1925 1925 oldheads = len(cl.heads())
1926 1926
1927 1927 tr = self.transaction()
1928 1928 try:
1929 1929 trp = weakref.proxy(tr)
1930 1930 # pull off the changeset group
1931 1931 self.ui.status(_("adding changesets\n"))
1932 1932 cor = cl.count() - 1
1933 1933 chunkiter = changegroup.chunkiter(source)
1934 1934 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1935 1935 raise util.Abort(_("received changelog group is empty"))
1936 1936 cnr = cl.count() - 1
1937 1937 changesets = cnr - cor
1938 1938
1939 1939 # pull off the manifest group
1940 1940 self.ui.status(_("adding manifests\n"))
1941 1941 chunkiter = changegroup.chunkiter(source)
1942 1942 # no need to check for empty manifest group here:
1943 1943 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1944 1944 # no new manifest will be created and the manifest group will
1945 1945 # be empty during the pull
1946 1946 self.manifest.addgroup(chunkiter, revmap, trp)
1947 1947
1948 1948 # process the files
1949 1949 self.ui.status(_("adding file changes\n"))
1950 1950 while 1:
1951 1951 f = changegroup.getchunk(source)
1952 1952 if not f:
1953 1953 break
1954 1954 self.ui.debug(_("adding %s revisions\n") % f)
1955 1955 fl = self.file(f)
1956 1956 o = fl.count()
1957 1957 chunkiter = changegroup.chunkiter(source)
1958 1958 if fl.addgroup(chunkiter, revmap, trp) is None:
1959 1959 raise util.Abort(_("received file revlog group is empty"))
1960 1960 revisions += fl.count() - o
1961 1961 files += 1
1962 1962
1963 1963 # make changelog see real files again
1964 1964 cl.finalize(trp)
1965 1965
1966 1966 newheads = len(self.changelog.heads())
1967 1967 heads = ""
1968 1968 if oldheads and newheads != oldheads:
1969 1969 heads = _(" (%+d heads)") % (newheads - oldheads)
1970 1970
1971 1971 self.ui.status(_("added %d changesets"
1972 1972 " with %d changes to %d files%s\n")
1973 1973 % (changesets, revisions, files, heads))
1974 1974
1975 1975 if changesets > 0:
1976 1976 self.hook('pretxnchangegroup', throw=True,
1977 1977 node=hex(self.changelog.node(cor+1)), source=srctype,
1978 1978 url=url)
1979 1979
1980 1980 tr.close()
1981 1981 finally:
1982 1982 del tr
1983 1983
1984 1984 if changesets > 0:
1985 1985 # forcefully update the on-disk branch cache
1986 1986 self.ui.debug(_("updating the branch cache\n"))
1987 1987 self.branchcache = None
1988 1988 self.branchtags()
1989 1989 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1990 1990 source=srctype, url=url)
1991 1991
1992 1992 for i in xrange(cor + 1, cnr + 1):
1993 1993 self.hook("incoming", node=hex(self.changelog.node(i)),
1994 1994 source=srctype, url=url)
1995 1995
1996 1996 # never return 0 here:
1997 1997 if newheads < oldheads:
1998 1998 return newheads - oldheads - 1
1999 1999 else:
2000 2000 return newheads - oldheads + 1
2001 2001
2002 2002
2003 2003 def stream_in(self, remote):
2004 2004 fp = remote.stream_out()
2005 2005 l = fp.readline()
2006 2006 try:
2007 2007 resp = int(l)
2008 2008 except ValueError:
2009 2009 raise util.UnexpectedOutput(
2010 2010 _('Unexpected response from remote server:'), l)
2011 2011 if resp == 1:
2012 2012 raise util.Abort(_('operation forbidden by server'))
2013 2013 elif resp == 2:
2014 2014 raise util.Abort(_('locking the remote repository failed'))
2015 2015 elif resp != 0:
2016 2016 raise util.Abort(_('the server sent an unknown error code'))
2017 2017 self.ui.status(_('streaming all changes\n'))
2018 2018 l = fp.readline()
2019 2019 try:
2020 2020 total_files, total_bytes = map(int, l.split(' ', 1))
2021 2021 except ValueError, TypeError:
2022 2022 raise util.UnexpectedOutput(
2023 2023 _('Unexpected response from remote server:'), l)
2024 2024 self.ui.status(_('%d files to transfer, %s of data\n') %
2025 2025 (total_files, util.bytecount(total_bytes)))
2026 2026 start = time.time()
2027 2027 for i in xrange(total_files):
2028 2028 # XXX doesn't support '\n' or '\r' in filenames
2029 2029 l = fp.readline()
2030 2030 try:
2031 2031 name, size = l.split('\0', 1)
2032 2032 size = int(size)
2033 2033 except ValueError, TypeError:
2034 2034 raise util.UnexpectedOutput(
2035 2035 _('Unexpected response from remote server:'), l)
2036 2036 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2037 2037 ofp = self.sopener(name, 'w')
2038 2038 for chunk in util.filechunkiter(fp, limit=size):
2039 2039 ofp.write(chunk)
2040 2040 ofp.close()
2041 2041 elapsed = time.time() - start
2042 2042 if elapsed <= 0:
2043 2043 elapsed = 0.001
2044 2044 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2045 2045 (util.bytecount(total_bytes), elapsed,
2046 2046 util.bytecount(total_bytes / elapsed)))
2047 2047 self.invalidate()
2048 2048 return len(self.heads()) + 1
2049 2049
2050 2050 def clone(self, remote, heads=[], stream=False):
2051 2051 '''clone remote repository.
2052 2052
2053 2053 keyword arguments:
2054 2054 heads: list of revs to clone (forces use of pull)
2055 2055 stream: use streaming clone if possible'''
2056 2056
2057 2057 # now, all clients that can request uncompressed clones can
2058 2058 # read repo formats supported by all servers that can serve
2059 2059 # them.
2060 2060
2061 2061 # if revlog format changes, client will have to check version
2062 2062 # and format flags on "stream" capability, and use
2063 2063 # uncompressed only if compatible.
2064 2064
2065 2065 if stream and not heads and remote.capable('stream'):
2066 2066 return self.stream_in(remote)
2067 2067 return self.pull(remote, heads)
2068 2068
2069 2069 # used to avoid circular references so destructors work
2070 2070 def aftertrans(files):
2071 2071 renamefiles = [tuple(t) for t in files]
2072 2072 def a():
2073 2073 for src, dest in renamefiles:
2074 2074 util.rename(src, dest)
2075 2075 return a
2076 2076
2077 2077 def instance(ui, path, create):
2078 2078 return localrepository(ui, util.drop_scheme('file', path), create)
2079 2079
2080 2080 def islocal(path):
2081 2081 return True
@@ -1,117 +1,117 b''
1 1 marked working directory as branch foo
2 2 foo
3 3 marked working directory as branch bar
4 4 % branch shadowing
5 5 abort: a branch of the same name already exists (use --force to override)
6 6 marked working directory as branch default
7 7 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 8 foo
9 9 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
10 10 (branch merge, don't forget to commit)
11 11 foo
12 12 changeset: 5:5f8fb06e083e
13 13 branch: foo
14 14 tag: tip
15 15 parent: 4:4909a3732169
16 16 parent: 3:bf1bc2f45e83
17 17 user: test
18 18 date: Mon Jan 12 13:46:40 1970 +0000
19 19 summary: merge
20 20
21 21 changeset: 4:4909a3732169
22 22 branch: foo
23 23 parent: 1:b699b1cec9c2
24 24 user: test
25 25 date: Mon Jan 12 13:46:40 1970 +0000
26 26 summary: modify a branch
27 27
28 28 changeset: 3:bf1bc2f45e83
29 29 user: test
30 30 date: Mon Jan 12 13:46:40 1970 +0000
31 31 summary: clear branch name
32 32
33 33 changeset: 2:67ec16bde7f1
34 34 branch: bar
35 35 user: test
36 36 date: Mon Jan 12 13:46:40 1970 +0000
37 37 summary: change branch name
38 38
39 39 changeset: 1:b699b1cec9c2
40 40 branch: foo
41 41 user: test
42 42 date: Mon Jan 12 13:46:40 1970 +0000
43 43 summary: add branch name
44 44
45 45 changeset: 0:be8523e69bf8
46 46 user: test
47 47 date: Mon Jan 12 13:46:40 1970 +0000
48 48 summary: initial
49 49
50 50 foo 5:5f8fb06e083e
51 51 default 3:bf1bc2f45e83 (inactive)
52 52 bar 2:67ec16bde7f1 (inactive)
53 53 foo
54 54 default
55 55 bar
56 56 % test for invalid branch cache
57 57 rolling back last transaction
58 58 changeset: 4:4909a3732169
59 59 branch: foo
60 60 tag: tip
61 61 parent: 1:b699b1cec9c2
62 62 user: test
63 63 date: Mon Jan 12 13:46:40 1970 +0000
64 64 summary: modify a branch
65 65
66 Invalid branch cache: unknown tip
66 invalidating branch cache (tip differs)
67 67 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
68 68 branch: foo
69 69 tag: tip
70 70 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
71 71 parent: -1:0000000000000000000000000000000000000000
72 72 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
73 73 user: test
74 74 date: Mon Jan 12 13:46:40 1970 +0000
75 75 files: a
76 76 extra: branch=foo
77 77 description:
78 78 modify a branch
79 79
80 80
81 81 4:4909a3732169
82 82 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
83 83 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
84 84 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
85 85 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
86 86 % push should update the branch cache
87 87 % pushing just rev 0
88 88 be8523e69bf892e25817fc97187516b3c0804ae4 0
89 89 be8523e69bf892e25817fc97187516b3c0804ae4 default
90 90 % pushing everything
91 91 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
92 92 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
93 93 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
94 94 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
95 95 % update with no arguments: tipmost revision of the current branch
96 96 bf1bc2f45e83
97 97 4909a3732169 (foo) tip
98 98 marked working directory as branch foobar
99 99 abort: branch foobar not found
100 100 % fastforward merge
101 101 marked working directory as branch ff
102 102 adding ff
103 103 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
104 104 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
105 105 (branch merge, don't forget to commit)
106 106 foo
107 107 changeset: 6:f0c74f92a385
108 108 branch: foo
109 109 tag: tip
110 110 parent: 4:4909a3732169
111 111 parent: 5:c420d2121b71
112 112 user: test
113 113 date: Mon Jan 12 13:46:40 1970 +0000
114 114 summary: Merge ff into foo
115 115
116 116 a
117 117 ff
General Comments 0
You need to be logged in to leave comments. Login now