##// END OF EJS Templates
store all heads of a branch in the branch cache...
John Mulligan -
r7654:816b708f default
parent child Browse files
Show More
@@ -1,2151 +1,2131
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, time, util, extensions, hook, inspect, error
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store', 'fncache')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 if parentui.configbool('format', 'usefncache', True):
39 39 requirements.append("fncache")
40 40 # create an invalid changelog
41 41 self.opener("00changelog.i", "a").write(
42 42 '\0\0\0\2' # represents revlogv2
43 43 ' dummy changelog to prevent using the old repo layout'
44 44 )
45 45 reqfile = self.opener("requires", "w")
46 46 for r in requirements:
47 47 reqfile.write("%s\n" % r)
48 48 reqfile.close()
49 49 else:
50 50 raise error.RepoError(_("repository %s not found") % path)
51 51 elif create:
52 52 raise error.RepoError(_("repository %s already exists") % path)
53 53 else:
54 54 # find requirements
55 55 requirements = []
56 56 try:
57 57 requirements = self.opener("requires").read().splitlines()
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64
65 65 self.store = store.store(requirements, self.path, util.opener)
66 66 self.spath = self.store.path
67 67 self.sopener = self.store.opener
68 68 self.sjoin = self.store.join
69 69 self.opener.createmode = self.store.createmode
70 70
71 71 self.ui = ui.ui(parentui=parentui)
72 72 try:
73 73 self.ui.readconfig(self.join("hgrc"), self.root)
74 74 extensions.loadall(self.ui)
75 75 except IOError:
76 76 pass
77 77
78 78 self.tagscache = None
79 79 self._tagstypecache = None
80 80 self.branchcache = None
81 81 self._ubranchcache = None # UTF-8 version of branchcache
82 82 self._branchcachetip = None
83 83 self.nodetagscache = None
84 84 self.filterpats = {}
85 85 self._datafilters = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError(name)
102 102
103 103 def __getitem__(self, changeid):
104 104 if changeid == None:
105 105 return context.workingctx(self)
106 106 return context.changectx(self, changeid)
107 107
108 108 def __nonzero__(self):
109 109 return True
110 110
111 111 def __len__(self):
112 112 return len(self.changelog)
113 113
114 114 def __iter__(self):
115 115 for i in xrange(len(self)):
116 116 yield i
117 117
118 118 def url(self):
119 119 return 'file:' + self.root
120 120
121 121 def hook(self, name, throw=False, **args):
122 122 return hook.hook(self.ui, self, name, throw, **args)
123 123
124 124 tag_disallowed = ':\r\n'
125 125
126 126 def _tag(self, names, node, message, local, user, date, parent=None,
127 127 extra={}):
128 128 use_dirstate = parent is None
129 129
130 130 if isinstance(names, str):
131 131 allchars = names
132 132 names = (names,)
133 133 else:
134 134 allchars = ''.join(names)
135 135 for c in self.tag_disallowed:
136 136 if c in allchars:
137 137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 138
139 139 for name in names:
140 140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 141 local=local)
142 142
143 143 def writetags(fp, names, munge, prevtags):
144 144 fp.seek(0, 2)
145 145 if prevtags and prevtags[-1] != '\n':
146 146 fp.write('\n')
147 147 for name in names:
148 148 m = munge and munge(name) or name
149 149 if self._tagstypecache and name in self._tagstypecache:
150 150 old = self.tagscache.get(name, nullid)
151 151 fp.write('%s %s\n' % (hex(old), m))
152 152 fp.write('%s %s\n' % (hex(node), m))
153 153 fp.close()
154 154
155 155 prevtags = ''
156 156 if local:
157 157 try:
158 158 fp = self.opener('localtags', 'r+')
159 159 except IOError, err:
160 160 fp = self.opener('localtags', 'a')
161 161 else:
162 162 prevtags = fp.read()
163 163
164 164 # local tags are stored in the current charset
165 165 writetags(fp, names, None, prevtags)
166 166 for name in names:
167 167 self.hook('tag', node=hex(node), tag=name, local=local)
168 168 return
169 169
170 170 if use_dirstate:
171 171 try:
172 172 fp = self.wfile('.hgtags', 'rb+')
173 173 except IOError, err:
174 174 fp = self.wfile('.hgtags', 'ab')
175 175 else:
176 176 prevtags = fp.read()
177 177 else:
178 178 try:
179 179 prevtags = self.filectx('.hgtags', parent).data()
180 180 except error.LookupError:
181 181 pass
182 182 fp = self.wfile('.hgtags', 'wb')
183 183 if prevtags:
184 184 fp.write(prevtags)
185 185
186 186 # committed tags are stored in UTF-8
187 187 writetags(fp, names, util.fromlocal, prevtags)
188 188
189 189 if use_dirstate and '.hgtags' not in self.dirstate:
190 190 self.add(['.hgtags'])
191 191
192 192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 193 extra=extra)
194 194
195 195 for name in names:
196 196 self.hook('tag', node=hex(node), tag=name, local=local)
197 197
198 198 return tagnode
199 199
200 200 def tag(self, names, node, message, local, user, date):
201 201 '''tag a revision with one or more symbolic names.
202 202
203 203 names is a list of strings or, when adding a single tag, names may be a
204 204 string.
205 205
206 206 if local is True, the tags are stored in a per-repository file.
207 207 otherwise, they are stored in the .hgtags file, and a new
208 208 changeset is committed with the change.
209 209
210 210 keyword arguments:
211 211
212 212 local: whether to store tags in non-version-controlled file
213 213 (default False)
214 214
215 215 message: commit message to use if committing
216 216
217 217 user: name of user to use if committing
218 218
219 219 date: date tuple to use if committing'''
220 220
221 221 for x in self.status()[:5]:
222 222 if '.hgtags' in x:
223 223 raise util.Abort(_('working copy of .hgtags is changed '
224 224 '(please commit .hgtags manually)'))
225 225
226 226 self._tag(names, node, message, local, user, date)
227 227
228 228 def tags(self):
229 229 '''return a mapping of tag to node'''
230 230 if self.tagscache:
231 231 return self.tagscache
232 232
233 233 globaltags = {}
234 234 tagtypes = {}
235 235
236 236 def readtags(lines, fn, tagtype):
237 237 filetags = {}
238 238 count = 0
239 239
240 240 def warn(msg):
241 241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242 242
243 243 for l in lines:
244 244 count += 1
245 245 if not l:
246 246 continue
247 247 s = l.split(" ", 1)
248 248 if len(s) != 2:
249 249 warn(_("cannot parse entry"))
250 250 continue
251 251 node, key = s
252 252 key = util.tolocal(key.strip()) # stored in UTF-8
253 253 try:
254 254 bin_n = bin(node)
255 255 except TypeError:
256 256 warn(_("node '%s' is not well formed") % node)
257 257 continue
258 258 if bin_n not in self.changelog.nodemap:
259 259 warn(_("tag '%s' refers to unknown node") % key)
260 260 continue
261 261
262 262 h = []
263 263 if key in filetags:
264 264 n, h = filetags[key]
265 265 h.append(n)
266 266 filetags[key] = (bin_n, h)
267 267
268 268 for k, nh in filetags.iteritems():
269 269 if k not in globaltags:
270 270 globaltags[k] = nh
271 271 tagtypes[k] = tagtype
272 272 continue
273 273
274 274 # we prefer the global tag if:
275 275 # it supercedes us OR
276 276 # mutual supercedes and it has a higher rank
277 277 # otherwise we win because we're tip-most
278 278 an, ah = nh
279 279 bn, bh = globaltags[k]
280 280 if (bn != an and an in bh and
281 281 (bn not in ah or len(bh) > len(ah))):
282 282 an = bn
283 283 ah.extend([n for n in bh if n not in ah])
284 284 globaltags[k] = an, ah
285 285 tagtypes[k] = tagtype
286 286
287 287 # read the tags file from each head, ending with the tip
288 288 f = None
289 289 for rev, node, fnode in self._hgtagsnodes():
290 290 f = (f and f.filectx(fnode) or
291 291 self.filectx('.hgtags', fileid=fnode))
292 292 readtags(f.data().splitlines(), f, "global")
293 293
294 294 try:
295 295 data = util.fromlocal(self.opener("localtags").read())
296 296 # localtags are stored in the local character set
297 297 # while the internal tag table is stored in UTF-8
298 298 readtags(data.splitlines(), "localtags", "local")
299 299 except IOError:
300 300 pass
301 301
302 302 self.tagscache = {}
303 303 self._tagstypecache = {}
304 304 for k, nh in globaltags.iteritems():
305 305 n = nh[0]
306 306 if n != nullid:
307 307 self.tagscache[k] = n
308 308 self._tagstypecache[k] = tagtypes[k]
309 309 self.tagscache['tip'] = self.changelog.tip()
310 310 return self.tagscache
311 311
312 312 def tagtype(self, tagname):
313 313 '''
314 314 return the type of the given tag. result can be:
315 315
316 316 'local' : a local tag
317 317 'global' : a global tag
318 318 None : tag does not exist
319 319 '''
320 320
321 321 self.tags()
322 322
323 323 return self._tagstypecache.get(tagname)
324 324
325 325 def _hgtagsnodes(self):
326 326 heads = self.heads()
327 327 heads.reverse()
328 328 last = {}
329 329 ret = []
330 330 for node in heads:
331 331 c = self[node]
332 332 rev = c.rev()
333 333 try:
334 334 fnode = c.filenode('.hgtags')
335 335 except error.LookupError:
336 336 continue
337 337 ret.append((rev, node, fnode))
338 338 if fnode in last:
339 339 ret[last[fnode]] = None
340 340 last[fnode] = len(ret) - 1
341 341 return [item for item in ret if item]
342 342
343 343 def tagslist(self):
344 344 '''return a list of tags ordered by revision'''
345 345 l = []
346 346 for t, n in self.tags().iteritems():
347 347 try:
348 348 r = self.changelog.rev(n)
349 349 except:
350 350 r = -2 # sort to the beginning of the list if unknown
351 351 l.append((r, t, n))
352 352 return [(t, n) for r, t, n in util.sort(l)]
353 353
354 354 def nodetags(self, node):
355 355 '''return the tags associated with a node'''
356 356 if not self.nodetagscache:
357 357 self.nodetagscache = {}
358 358 for t, n in self.tags().iteritems():
359 359 self.nodetagscache.setdefault(n, []).append(t)
360 360 return self.nodetagscache.get(node, [])
361 361
362 362 def _branchtags(self, partial, lrev):
363 # TODO: rename this function?
363 364 tiprev = len(self) - 1
364 365 if lrev != tiprev:
365 366 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 367 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 368
368 369 return partial
369 370
370 def branchtags(self):
371 def _branchheads(self):
371 372 tip = self.changelog.tip()
372 373 if self.branchcache is not None and self._branchcachetip == tip:
373 374 return self.branchcache
374 375
375 376 oldtip = self._branchcachetip
376 377 self._branchcachetip = tip
377 378 if self.branchcache is None:
378 379 self.branchcache = {} # avoid recursion in changectx
379 380 else:
380 381 self.branchcache.clear() # keep using the same dict
381 382 if oldtip is None or oldtip not in self.changelog.nodemap:
382 383 partial, last, lrev = self._readbranchcache()
383 384 else:
384 385 lrev = self.changelog.rev(oldtip)
385 386 partial = self._ubranchcache
386 387
387 388 self._branchtags(partial, lrev)
389 # this private cache holds all heads (not just tips)
390 self._ubranchcache = partial
388 391
389 392 # the branch cache is stored on disk as UTF-8, but in the local
390 393 # charset internally
391 394 for k, v in partial.iteritems():
392 395 self.branchcache[util.tolocal(k)] = v
393 self._ubranchcache = partial
394 396 return self.branchcache
395 397
398
399 def branchtags(self):
400 '''return a dict where branch names map to the tipmost head of
401 the branch'''
402 return dict([(k, v[-1]) for (k, v) in self._branchheads().iteritems()])
403
396 404 def _readbranchcache(self):
397 405 partial = {}
398 406 try:
399 f = self.opener("branch.cache")
407 f = self.opener("branchheads.cache")
400 408 lines = f.read().split('\n')
401 409 f.close()
402 410 except (IOError, OSError):
403 411 return {}, nullid, nullrev
404 412
405 413 try:
406 414 last, lrev = lines.pop(0).split(" ", 1)
407 415 last, lrev = bin(last), int(lrev)
408 416 if lrev >= len(self) or self[lrev].node() != last:
409 417 # invalidate the cache
410 418 raise ValueError('invalidating branch cache (tip differs)')
411 419 for l in lines:
412 420 if not l: continue
413 421 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
422 partial.setdefault(label.strip(), []).append(bin(node))
415 423 except KeyboardInterrupt:
416 424 raise
417 425 except Exception, inst:
418 426 if self.ui.debugflag:
419 427 self.ui.warn(str(inst), '\n')
420 428 partial, last, lrev = {}, nullid, nullrev
421 429 return partial, last, lrev
422 430
423 431 def _writebranchcache(self, branches, tip, tiprev):
424 432 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
433 f = self.opener("branchheads.cache", "w", atomictemp=True)
426 434 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
435 for label, nodes in branches.iteritems():
436 for node in nodes:
437 f.write("%s %s\n" % (hex(node), label))
429 438 f.rename()
430 439 except (IOError, OSError):
431 440 pass
432 441
433 442 def _updatebranchcache(self, partial, start, end):
434 443 for r in xrange(start, end):
435 444 c = self[r]
436 445 b = c.branch()
437 partial[b] = c.node()
446 bheads = partial.setdefault(b, [])
447 bheads.append(c.node())
448 for p in c.parents():
449 pn = p.node()
450 if pn in bheads:
451 bheads.remove(pn)
438 452
439 453 def lookup(self, key):
440 454 if isinstance(key, int):
441 455 return self.changelog.node(key)
442 456 elif key == '.':
443 457 return self.dirstate.parents()[0]
444 458 elif key == 'null':
445 459 return nullid
446 460 elif key == 'tip':
447 461 return self.changelog.tip()
448 462 n = self.changelog._match(key)
449 463 if n:
450 464 return n
451 465 if key in self.tags():
452 466 return self.tags()[key]
453 467 if key in self.branchtags():
454 468 return self.branchtags()[key]
455 469 n = self.changelog._partialmatch(key)
456 470 if n:
457 471 return n
458 472 try:
459 473 if len(key) == 20:
460 474 key = hex(key)
461 475 except:
462 476 pass
463 477 raise error.RepoError(_("unknown revision '%s'") % key)
464 478
465 479 def local(self):
466 480 return True
467 481
468 482 def join(self, f):
469 483 return os.path.join(self.path, f)
470 484
471 485 def wjoin(self, f):
472 486 return os.path.join(self.root, f)
473 487
474 488 def rjoin(self, f):
475 489 return os.path.join(self.root, util.pconvert(f))
476 490
477 491 def file(self, f):
478 492 if f[0] == '/':
479 493 f = f[1:]
480 494 return filelog.filelog(self.sopener, f)
481 495
482 496 def changectx(self, changeid):
483 497 return self[changeid]
484 498
485 499 def parents(self, changeid=None):
486 500 '''get list of changectxs for parents of changeid'''
487 501 return self[changeid].parents()
488 502
489 503 def filectx(self, path, changeid=None, fileid=None):
490 504 """changeid can be a changeset revision, node, or tag.
491 505 fileid can be a file revision or node."""
492 506 return context.filectx(self, path, changeid, fileid)
493 507
494 508 def getcwd(self):
495 509 return self.dirstate.getcwd()
496 510
497 511 def pathto(self, f, cwd=None):
498 512 return self.dirstate.pathto(f, cwd)
499 513
500 514 def wfile(self, f, mode='r'):
501 515 return self.wopener(f, mode)
502 516
503 517 def _link(self, f):
504 518 return os.path.islink(self.wjoin(f))
505 519
506 520 def _filter(self, filter, filename, data):
507 521 if filter not in self.filterpats:
508 522 l = []
509 523 for pat, cmd in self.ui.configitems(filter):
510 524 if cmd == '!':
511 525 continue
512 526 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 527 fn = None
514 528 params = cmd
515 529 for name, filterfn in self._datafilters.iteritems():
516 530 if cmd.startswith(name):
517 531 fn = filterfn
518 532 params = cmd[len(name):].lstrip()
519 533 break
520 534 if not fn:
521 535 fn = lambda s, c, **kwargs: util.filter(s, c)
522 536 # Wrap old filters not supporting keyword arguments
523 537 if not inspect.getargspec(fn)[2]:
524 538 oldfn = fn
525 539 fn = lambda s, c, **kwargs: oldfn(s, c)
526 540 l.append((mf, fn, params))
527 541 self.filterpats[filter] = l
528 542
529 543 for mf, fn, cmd in self.filterpats[filter]:
530 544 if mf(filename):
531 545 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
532 546 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
533 547 break
534 548
535 549 return data
536 550
537 551 def adddatafilter(self, name, filter):
538 552 self._datafilters[name] = filter
539 553
540 554 def wread(self, filename):
541 555 if self._link(filename):
542 556 data = os.readlink(self.wjoin(filename))
543 557 else:
544 558 data = self.wopener(filename, 'r').read()
545 559 return self._filter("encode", filename, data)
546 560
547 561 def wwrite(self, filename, data, flags):
548 562 data = self._filter("decode", filename, data)
549 563 try:
550 564 os.unlink(self.wjoin(filename))
551 565 except OSError:
552 566 pass
553 567 if 'l' in flags:
554 568 self.wopener.symlink(data, filename)
555 569 else:
556 570 self.wopener(filename, 'w').write(data)
557 571 if 'x' in flags:
558 572 util.set_flags(self.wjoin(filename), False, True)
559 573
560 574 def wwritedata(self, filename, data):
561 575 return self._filter("decode", filename, data)
562 576
563 577 def transaction(self):
564 578 if self._transref and self._transref():
565 579 return self._transref().nest()
566 580
567 581 # abort here if the journal already exists
568 582 if os.path.exists(self.sjoin("journal")):
569 583 raise error.RepoError(_("journal already exists - run hg recover"))
570 584
571 585 # save dirstate for rollback
572 586 try:
573 587 ds = self.opener("dirstate").read()
574 588 except IOError:
575 589 ds = ""
576 590 self.opener("journal.dirstate", "w").write(ds)
577 591 self.opener("journal.branch", "w").write(self.dirstate.branch())
578 592
579 593 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 594 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 595 (self.join("journal.branch"), self.join("undo.branch"))]
582 596 tr = transaction.transaction(self.ui.warn, self.sopener,
583 597 self.sjoin("journal"),
584 598 aftertrans(renames),
585 599 self.store.createmode)
586 600 self._transref = weakref.ref(tr)
587 601 return tr
588 602
589 603 def recover(self):
590 604 l = self.lock()
591 605 try:
592 606 if os.path.exists(self.sjoin("journal")):
593 607 self.ui.status(_("rolling back interrupted transaction\n"))
594 608 transaction.rollback(self.sopener, self.sjoin("journal"))
595 609 self.invalidate()
596 610 return True
597 611 else:
598 612 self.ui.warn(_("no interrupted transaction available\n"))
599 613 return False
600 614 finally:
601 615 del l
602 616
603 617 def rollback(self):
604 618 wlock = lock = None
605 619 try:
606 620 wlock = self.wlock()
607 621 lock = self.lock()
608 622 if os.path.exists(self.sjoin("undo")):
609 623 self.ui.status(_("rolling back last transaction\n"))
610 624 transaction.rollback(self.sopener, self.sjoin("undo"))
611 625 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 626 try:
613 627 branch = self.opener("undo.branch").read()
614 628 self.dirstate.setbranch(branch)
615 629 except IOError:
616 630 self.ui.warn(_("Named branch could not be reset, "
617 631 "current branch still is: %s\n")
618 632 % util.tolocal(self.dirstate.branch()))
619 633 self.invalidate()
620 634 self.dirstate.invalidate()
621 635 else:
622 636 self.ui.warn(_("no rollback information available\n"))
623 637 finally:
624 638 del lock, wlock
625 639
626 640 def invalidate(self):
627 641 for a in "changelog manifest".split():
628 642 if a in self.__dict__:
629 643 delattr(self, a)
630 644 self.tagscache = None
631 645 self._tagstypecache = None
632 646 self.nodetagscache = None
633 647 self.branchcache = None
634 648 self._ubranchcache = None
635 649 self._branchcachetip = None
636 650
637 651 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 652 try:
639 653 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 654 except error.LockHeld, inst:
641 655 if not wait:
642 656 raise
643 657 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 658 (desc, inst.locker))
645 659 # default to 600 seconds timeout
646 660 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 661 releasefn, desc=desc)
648 662 if acquirefn:
649 663 acquirefn()
650 664 return l
651 665
652 666 def lock(self, wait=True):
653 667 if self._lockref and self._lockref():
654 668 return self._lockref()
655 669
656 670 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 671 _('repository %s') % self.origroot)
658 672 self._lockref = weakref.ref(l)
659 673 return l
660 674
661 675 def wlock(self, wait=True):
662 676 if self._wlockref and self._wlockref():
663 677 return self._wlockref()
664 678
665 679 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 680 self.dirstate.invalidate, _('working directory of %s') %
667 681 self.origroot)
668 682 self._wlockref = weakref.ref(l)
669 683 return l
670 684
671 685 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 686 """
673 687 commit an individual file as part of a larger transaction
674 688 """
675 689
676 690 fn = fctx.path()
677 691 t = fctx.data()
678 692 fl = self.file(fn)
679 693 fp1 = manifest1.get(fn, nullid)
680 694 fp2 = manifest2.get(fn, nullid)
681 695
682 696 meta = {}
683 697 cp = fctx.renamed()
684 698 if cp and cp[0] != fn:
685 699 # Mark the new revision of this file as a copy of another
686 700 # file. This copy data will effectively act as a parent
687 701 # of this new revision. If this is a merge, the first
688 702 # parent will be the nullid (meaning "look up the copy data")
689 703 # and the second one will be the other parent. For example:
690 704 #
691 705 # 0 --- 1 --- 3 rev1 changes file foo
692 706 # \ / rev2 renames foo to bar and changes it
693 707 # \- 2 -/ rev3 should have bar with all changes and
694 708 # should record that bar descends from
695 709 # bar in rev2 and foo in rev1
696 710 #
697 711 # this allows this merge to succeed:
698 712 #
699 713 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 714 # \ / merging rev3 and rev4 should use bar@rev2
701 715 # \- 2 --- 4 as the merge base
702 716 #
703 717
704 718 cf = cp[0]
705 719 cr = manifest1.get(cf)
706 720 nfp = fp2
707 721
708 722 if manifest2: # branch merge
709 723 if fp2 == nullid: # copied on remote side
710 724 if fp1 != nullid or cf in manifest2:
711 725 cr = manifest2[cf]
712 726 nfp = fp1
713 727
714 728 # find source in nearest ancestor if we've lost track
715 729 if not cr:
716 730 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
717 731 (fn, cf))
718 732 for a in self['.'].ancestors():
719 733 if cf in a:
720 734 cr = a[cf].filenode()
721 735 break
722 736
723 737 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
724 738 meta["copy"] = cf
725 739 meta["copyrev"] = hex(cr)
726 740 fp1, fp2 = nullid, nfp
727 741 elif fp2 != nullid:
728 742 # is one parent an ancestor of the other?
729 743 fpa = fl.ancestor(fp1, fp2)
730 744 if fpa == fp1:
731 745 fp1, fp2 = fp2, nullid
732 746 elif fpa == fp2:
733 747 fp2 = nullid
734 748
735 749 # is the file unmodified from the parent? report existing entry
736 750 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
737 751 return fp1
738 752
739 753 changelist.append(fn)
740 754 return fl.add(t, meta, tr, linkrev, fp1, fp2)
741 755
742 756 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
743 757 if p1 is None:
744 758 p1, p2 = self.dirstate.parents()
745 759 return self.commit(files=files, text=text, user=user, date=date,
746 760 p1=p1, p2=p2, extra=extra, empty_ok=True)
747 761
748 762 def commit(self, files=None, text="", user=None, date=None,
749 763 match=None, force=False, force_editor=False,
750 764 p1=None, p2=None, extra={}, empty_ok=False):
751 765 wlock = lock = None
752 766 if files:
753 767 files = util.unique(files)
754 768 try:
755 769 wlock = self.wlock()
756 770 lock = self.lock()
757 771 use_dirstate = (p1 is None) # not rawcommit
758 772
759 773 if use_dirstate:
760 774 p1, p2 = self.dirstate.parents()
761 775 update_dirstate = True
762 776
763 777 if (not force and p2 != nullid and
764 778 (match and (match.files() or match.anypats()))):
765 779 raise util.Abort(_('cannot partially commit a merge '
766 780 '(do not specify files or patterns)'))
767 781
768 782 if files:
769 783 modified, removed = [], []
770 784 for f in files:
771 785 s = self.dirstate[f]
772 786 if s in 'nma':
773 787 modified.append(f)
774 788 elif s == 'r':
775 789 removed.append(f)
776 790 else:
777 791 self.ui.warn(_("%s not tracked!\n") % f)
778 792 changes = [modified, [], removed, [], []]
779 793 else:
780 794 changes = self.status(match=match)
781 795 else:
782 796 p1, p2 = p1, p2 or nullid
783 797 update_dirstate = (self.dirstate.parents()[0] == p1)
784 798 changes = [files, [], [], [], []]
785 799
786 800 ms = merge_.mergestate(self)
787 801 for f in changes[0]:
788 802 if f in ms and ms[f] == 'u':
789 803 raise util.Abort(_("unresolved merge conflicts "
790 804 "(see hg resolve)"))
791 805 wctx = context.workingctx(self, (p1, p2), text, user, date,
792 806 extra, changes)
793 807 return self._commitctx(wctx, force, force_editor, empty_ok,
794 808 use_dirstate, update_dirstate)
795 809 finally:
796 810 del lock, wlock
797 811
798 812 def commitctx(self, ctx):
799 813 """Add a new revision to current repository.
800 814
801 815 Revision information is passed in the context.memctx argument.
802 816 commitctx() does not touch the working directory.
803 817 """
804 818 wlock = lock = None
805 819 try:
806 820 wlock = self.wlock()
807 821 lock = self.lock()
808 822 return self._commitctx(ctx, force=True, force_editor=False,
809 823 empty_ok=True, use_dirstate=False,
810 824 update_dirstate=False)
811 825 finally:
812 826 del lock, wlock
813 827
814 828 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
815 829 use_dirstate=True, update_dirstate=True):
816 830 tr = None
817 831 valid = 0 # don't save the dirstate if this isn't set
818 832 try:
819 833 commit = util.sort(wctx.modified() + wctx.added())
820 834 remove = wctx.removed()
821 835 extra = wctx.extra().copy()
822 836 branchname = extra['branch']
823 837 user = wctx.user()
824 838 text = wctx.description()
825 839
826 840 p1, p2 = [p.node() for p in wctx.parents()]
827 841 c1 = self.changelog.read(p1)
828 842 c2 = self.changelog.read(p2)
829 843 m1 = self.manifest.read(c1[0]).copy()
830 844 m2 = self.manifest.read(c2[0])
831 845
832 846 if use_dirstate:
833 847 oldname = c1[5].get("branch") # stored in UTF-8
834 848 if (not commit and not remove and not force and p2 == nullid
835 849 and branchname == oldname):
836 850 self.ui.status(_("nothing changed\n"))
837 851 return None
838 852
839 853 xp1 = hex(p1)
840 854 if p2 == nullid: xp2 = ''
841 855 else: xp2 = hex(p2)
842 856
843 857 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
844 858
845 859 tr = self.transaction()
846 860 trp = weakref.proxy(tr)
847 861
848 862 # check in files
849 863 new = {}
850 864 changed = []
851 865 linkrev = len(self)
852 866 for f in commit:
853 867 self.ui.note(f + "\n")
854 868 try:
855 869 fctx = wctx.filectx(f)
856 870 newflags = fctx.flags()
857 871 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
858 872 if ((not changed or changed[-1] != f) and
859 873 m2.get(f) != new[f]):
860 874 # mention the file in the changelog if some
861 875 # flag changed, even if there was no content
862 876 # change.
863 877 if m1.flags(f) != newflags:
864 878 changed.append(f)
865 879 m1.set(f, newflags)
866 880 if use_dirstate:
867 881 self.dirstate.normal(f)
868 882
869 883 except (OSError, IOError):
870 884 if use_dirstate:
871 885 self.ui.warn(_("trouble committing %s!\n") % f)
872 886 raise
873 887 else:
874 888 remove.append(f)
875 889
876 890 updated, added = [], []
877 891 for f in util.sort(changed):
878 892 if f in m1 or f in m2:
879 893 updated.append(f)
880 894 else:
881 895 added.append(f)
882 896
883 897 # update manifest
884 898 m1.update(new)
885 899 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
886 900 removed1 = []
887 901
888 902 for f in removed:
889 903 if f in m1:
890 904 del m1[f]
891 905 removed1.append(f)
892 906 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
893 907 (new, removed1))
894 908
895 909 # add changeset
896 910 if (not empty_ok and not text) or force_editor:
897 911 edittext = []
898 912 if text:
899 913 edittext.append(text)
900 914 edittext.append("")
901 915 edittext.append("") # Empty line between message and comments.
902 916 edittext.append(_("HG: Enter commit message."
903 917 " Lines beginning with 'HG:' are removed."))
904 918 edittext.append("HG: --")
905 919 edittext.append("HG: user: %s" % user)
906 920 if p2 != nullid:
907 921 edittext.append("HG: branch merge")
908 922 if branchname:
909 923 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
910 924 edittext.extend(["HG: added %s" % f for f in added])
911 925 edittext.extend(["HG: changed %s" % f for f in updated])
912 926 edittext.extend(["HG: removed %s" % f for f in removed])
913 927 if not added and not updated and not removed:
914 928 edittext.append("HG: no files changed")
915 929 edittext.append("")
916 930 # run editor in the repository root
917 931 olddir = os.getcwd()
918 932 os.chdir(self.root)
919 933 text = self.ui.edit("\n".join(edittext), user)
920 934 os.chdir(olddir)
921 935
922 936 lines = [line.rstrip() for line in text.rstrip().splitlines()]
923 937 while lines and not lines[0]:
924 938 del lines[0]
925 939 if not lines and use_dirstate:
926 940 raise util.Abort(_("empty commit message"))
927 941 text = '\n'.join(lines)
928 942
929 943 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
930 944 user, wctx.date(), extra)
931 945 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
932 946 parent2=xp2)
933 947 tr.close()
934 948
935 949 if self.branchcache:
936 950 self.branchtags()
937 951
938 952 if use_dirstate or update_dirstate:
939 953 self.dirstate.setparents(n)
940 954 if use_dirstate:
941 955 for f in removed:
942 956 self.dirstate.forget(f)
943 957 valid = 1 # our dirstate updates are complete
944 958
945 959 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
946 960 return n
947 961 finally:
948 962 if not valid: # don't save our updated dirstate
949 963 self.dirstate.invalidate()
950 964 del tr
951 965
952 966 def walk(self, match, node=None):
953 967 '''
954 968 walk recursively through the directory tree or a given
955 969 changeset, finding all files matched by the match
956 970 function
957 971 '''
958 972 return self[node].walk(match)
959 973
960 974 def status(self, node1='.', node2=None, match=None,
961 975 ignored=False, clean=False, unknown=False):
962 976 """return status of files between two nodes or node and working directory
963 977
964 978 If node1 is None, use the first dirstate parent instead.
965 979 If node2 is None, compare node1 with working directory.
966 980 """
967 981
968 982 def mfmatches(ctx):
969 983 mf = ctx.manifest().copy()
970 984 for fn in mf.keys():
971 985 if not match(fn):
972 986 del mf[fn]
973 987 return mf
974 988
975 989 if isinstance(node1, context.changectx):
976 990 ctx1 = node1
977 991 else:
978 992 ctx1 = self[node1]
979 993 if isinstance(node2, context.changectx):
980 994 ctx2 = node2
981 995 else:
982 996 ctx2 = self[node2]
983 997
984 998 working = ctx2.rev() is None
985 999 parentworking = working and ctx1 == self['.']
986 1000 match = match or match_.always(self.root, self.getcwd())
987 1001 listignored, listclean, listunknown = ignored, clean, unknown
988 1002
989 1003 # load earliest manifest first for caching reasons
990 1004 if not working and ctx2.rev() < ctx1.rev():
991 1005 ctx2.manifest()
992 1006
993 1007 if not parentworking:
994 1008 def bad(f, msg):
995 1009 if f not in ctx1:
996 1010 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
997 1011 return False
998 1012 match.bad = bad
999 1013
1000 1014 if working: # we need to scan the working dir
1001 1015 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 1016 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003 1017
1004 1018 # check for any possibly clean files
1005 1019 if parentworking and cmp:
1006 1020 fixup = []
1007 1021 # do a full compare of any files that might have changed
1008 1022 for f in cmp:
1009 1023 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 1024 or ctx1[f].cmp(ctx2[f].data())):
1011 1025 modified.append(f)
1012 1026 else:
1013 1027 fixup.append(f)
1014 1028
1015 1029 if listclean:
1016 1030 clean += fixup
1017 1031
1018 1032 # update dirstate for files that are actually clean
1019 1033 if fixup:
1020 1034 wlock = None
1021 1035 try:
1022 1036 try:
1023 1037 wlock = self.wlock(False)
1024 1038 for f in fixup:
1025 1039 self.dirstate.normal(f)
1026 1040 except lock.LockError:
1027 1041 pass
1028 1042 finally:
1029 1043 del wlock
1030 1044
1031 1045 if not parentworking:
1032 1046 mf1 = mfmatches(ctx1)
1033 1047 if working:
1034 1048 # we are comparing working dir against non-parent
1035 1049 # generate a pseudo-manifest for the working dir
1036 1050 mf2 = mfmatches(self['.'])
1037 1051 for f in cmp + modified + added:
1038 1052 mf2[f] = None
1039 1053 mf2.set(f, ctx2.flags(f))
1040 1054 for f in removed:
1041 1055 if f in mf2:
1042 1056 del mf2[f]
1043 1057 else:
1044 1058 # we are comparing two revisions
1045 1059 deleted, unknown, ignored = [], [], []
1046 1060 mf2 = mfmatches(ctx2)
1047 1061
1048 1062 modified, added, clean = [], [], []
1049 1063 for fn in mf2:
1050 1064 if fn in mf1:
1051 1065 if (mf1.flags(fn) != mf2.flags(fn) or
1052 1066 (mf1[fn] != mf2[fn] and
1053 1067 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1054 1068 modified.append(fn)
1055 1069 elif listclean:
1056 1070 clean.append(fn)
1057 1071 del mf1[fn]
1058 1072 else:
1059 1073 added.append(fn)
1060 1074 removed = mf1.keys()
1061 1075
1062 1076 r = modified, added, removed, deleted, unknown, ignored, clean
1063 1077 [l.sort() for l in r]
1064 1078 return r
1065 1079
1066 1080 def add(self, list):
1067 1081 wlock = self.wlock()
1068 1082 try:
1069 1083 rejected = []
1070 1084 for f in list:
1071 1085 p = self.wjoin(f)
1072 1086 try:
1073 1087 st = os.lstat(p)
1074 1088 except:
1075 1089 self.ui.warn(_("%s does not exist!\n") % f)
1076 1090 rejected.append(f)
1077 1091 continue
1078 1092 if st.st_size > 10000000:
1079 1093 self.ui.warn(_("%s: files over 10MB may cause memory and"
1080 1094 " performance problems\n"
1081 1095 "(use 'hg revert %s' to unadd the file)\n")
1082 1096 % (f, f))
1083 1097 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1084 1098 self.ui.warn(_("%s not added: only files and symlinks "
1085 1099 "supported currently\n") % f)
1086 1100 rejected.append(p)
1087 1101 elif self.dirstate[f] in 'amn':
1088 1102 self.ui.warn(_("%s already tracked!\n") % f)
1089 1103 elif self.dirstate[f] == 'r':
1090 1104 self.dirstate.normallookup(f)
1091 1105 else:
1092 1106 self.dirstate.add(f)
1093 1107 return rejected
1094 1108 finally:
1095 1109 del wlock
1096 1110
1097 1111 def forget(self, list):
1098 1112 wlock = self.wlock()
1099 1113 try:
1100 1114 for f in list:
1101 1115 if self.dirstate[f] != 'a':
1102 1116 self.ui.warn(_("%s not added!\n") % f)
1103 1117 else:
1104 1118 self.dirstate.forget(f)
1105 1119 finally:
1106 1120 del wlock
1107 1121
1108 1122 def remove(self, list, unlink=False):
1109 1123 wlock = None
1110 1124 try:
1111 1125 if unlink:
1112 1126 for f in list:
1113 1127 try:
1114 1128 util.unlink(self.wjoin(f))
1115 1129 except OSError, inst:
1116 1130 if inst.errno != errno.ENOENT:
1117 1131 raise
1118 1132 wlock = self.wlock()
1119 1133 for f in list:
1120 1134 if unlink and os.path.exists(self.wjoin(f)):
1121 1135 self.ui.warn(_("%s still exists!\n") % f)
1122 1136 elif self.dirstate[f] == 'a':
1123 1137 self.dirstate.forget(f)
1124 1138 elif f not in self.dirstate:
1125 1139 self.ui.warn(_("%s not tracked!\n") % f)
1126 1140 else:
1127 1141 self.dirstate.remove(f)
1128 1142 finally:
1129 1143 del wlock
1130 1144
1131 1145 def undelete(self, list):
1132 1146 wlock = None
1133 1147 try:
1134 1148 manifests = [self.manifest.read(self.changelog.read(p)[0])
1135 1149 for p in self.dirstate.parents() if p != nullid]
1136 1150 wlock = self.wlock()
1137 1151 for f in list:
1138 1152 if self.dirstate[f] != 'r':
1139 1153 self.ui.warn(_("%s not removed!\n") % f)
1140 1154 else:
1141 1155 m = f in manifests[0] and manifests[0] or manifests[1]
1142 1156 t = self.file(f).read(m[f])
1143 1157 self.wwrite(f, t, m.flags(f))
1144 1158 self.dirstate.normal(f)
1145 1159 finally:
1146 1160 del wlock
1147 1161
1148 1162 def copy(self, source, dest):
1149 1163 wlock = None
1150 1164 try:
1151 1165 p = self.wjoin(dest)
1152 1166 if not (os.path.exists(p) or os.path.islink(p)):
1153 1167 self.ui.warn(_("%s does not exist!\n") % dest)
1154 1168 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 1169 self.ui.warn(_("copy failed: %s is not a file or a "
1156 1170 "symbolic link\n") % dest)
1157 1171 else:
1158 1172 wlock = self.wlock()
1159 1173 if self.dirstate[dest] in '?r':
1160 1174 self.dirstate.add(dest)
1161 1175 self.dirstate.copy(source, dest)
1162 1176 finally:
1163 1177 del wlock
1164 1178
1165 1179 def heads(self, start=None):
1166 1180 heads = self.changelog.heads(start)
1167 1181 # sort the output in rev descending order
1168 1182 heads = [(-self.changelog.rev(h), h) for h in heads]
1169 1183 return [n for (r, n) in util.sort(heads)]
1170 1184
1171 1185 def branchheads(self, branch=None, start=None):
1172 1186 if branch is None:
1173 1187 branch = self[None].branch()
1174 branches = self.branchtags()
1188 branches = self._branchheads()
1175 1189 if branch not in branches:
1176 1190 return []
1177 # The basic algorithm is this:
1178 #
1179 # Start from the branch tip since there are no later revisions that can
1180 # possibly be in this branch, and the tip is a guaranteed head.
1181 #
1182 # Remember the tip's parents as the first ancestors, since these by
1183 # definition are not heads.
1184 #
1185 # Step backwards from the brach tip through all the revisions. We are
1186 # guaranteed by the rules of Mercurial that we will now be visiting the
1187 # nodes in reverse topological order (children before parents).
1188 #
1189 # If a revision is one of the ancestors of a head then we can toss it
1190 # out of the ancestors set (we've already found it and won't be
1191 # visiting it again) and put its parents in the ancestors set.
1192 #
1193 # Otherwise, if a revision is in the branch it's another head, since it
1194 # wasn't in the ancestor list of an existing head. So add it to the
1195 # head list, and add its parents to the ancestor list.
1196 #
1197 # If it is not in the branch ignore it.
1198 #
1199 # Once we have a list of heads, use nodesbetween to filter out all the
1200 # heads that cannot be reached from startrev. There may be a more
1201 # efficient way to do this as part of the previous algorithm.
1202
1203 set = util.set
1204 heads = [self.changelog.rev(branches[branch])]
1205 # Don't care if ancestors contains nullrev or not.
1206 ancestors = set(self.changelog.parentrevs(heads[0]))
1207 for rev in xrange(heads[0] - 1, nullrev, -1):
1208 if rev in ancestors:
1209 ancestors.update(self.changelog.parentrevs(rev))
1210 ancestors.remove(rev)
1211 elif self[rev].branch() == branch:
1212 heads.append(rev)
1213 ancestors.update(self.changelog.parentrevs(rev))
1214 heads = [self.changelog.node(rev) for rev in heads]
1191 bheads = branches[branch]
1192 # the cache returns heads ordered lowest to highest
1193 bheads.reverse()
1215 1194 if start is not None:
1216 heads = self.changelog.nodesbetween([start], heads)[2]
1217 return heads
1195 # filter out the heads that cannot be reached from startrev
1196 bheads = self.changelog.nodesbetween([start], bheads)[2]
1197 return bheads
1218 1198
1219 1199 def branches(self, nodes):
1220 1200 if not nodes:
1221 1201 nodes = [self.changelog.tip()]
1222 1202 b = []
1223 1203 for n in nodes:
1224 1204 t = n
1225 1205 while 1:
1226 1206 p = self.changelog.parents(n)
1227 1207 if p[1] != nullid or p[0] == nullid:
1228 1208 b.append((t, n, p[0], p[1]))
1229 1209 break
1230 1210 n = p[0]
1231 1211 return b
1232 1212
1233 1213 def between(self, pairs):
1234 1214 r = []
1235 1215
1236 1216 for top, bottom in pairs:
1237 1217 n, l, i = top, [], 0
1238 1218 f = 1
1239 1219
1240 1220 while n != bottom:
1241 1221 p = self.changelog.parents(n)[0]
1242 1222 if i == f:
1243 1223 l.append(n)
1244 1224 f = f * 2
1245 1225 n = p
1246 1226 i += 1
1247 1227
1248 1228 r.append(l)
1249 1229
1250 1230 return r
1251 1231
1252 1232 def findincoming(self, remote, base=None, heads=None, force=False):
1253 1233 """Return list of roots of the subsets of missing nodes from remote
1254 1234
1255 1235 If base dict is specified, assume that these nodes and their parents
1256 1236 exist on the remote side and that no child of a node of base exists
1257 1237 in both remote and self.
1258 1238 Furthermore base will be updated to include the nodes that exists
1259 1239 in self and remote but no children exists in self and remote.
1260 1240 If a list of heads is specified, return only nodes which are heads
1261 1241 or ancestors of these heads.
1262 1242
1263 1243 All the ancestors of base are in self and in remote.
1264 1244 All the descendants of the list returned are missing in self.
1265 1245 (and so we know that the rest of the nodes are missing in remote, see
1266 1246 outgoing)
1267 1247 """
1268 1248 return self.findcommonincoming(remote, base, heads, force)[1]
1269 1249
1270 1250 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1271 1251 """Return a tuple (common, missing roots, heads) used to identify
1272 1252 missing nodes from remote.
1273 1253
1274 1254 If base dict is specified, assume that these nodes and their parents
1275 1255 exist on the remote side and that no child of a node of base exists
1276 1256 in both remote and self.
1277 1257 Furthermore base will be updated to include the nodes that exists
1278 1258 in self and remote but no children exists in self and remote.
1279 1259 If a list of heads is specified, return only nodes which are heads
1280 1260 or ancestors of these heads.
1281 1261
1282 1262 All the ancestors of base are in self and in remote.
1283 1263 """
1284 1264 m = self.changelog.nodemap
1285 1265 search = []
1286 1266 fetch = {}
1287 1267 seen = {}
1288 1268 seenbranch = {}
1289 1269 if base == None:
1290 1270 base = {}
1291 1271
1292 1272 if not heads:
1293 1273 heads = remote.heads()
1294 1274
1295 1275 if self.changelog.tip() == nullid:
1296 1276 base[nullid] = 1
1297 1277 if heads != [nullid]:
1298 1278 return [nullid], [nullid], list(heads)
1299 1279 return [nullid], [], []
1300 1280
1301 1281 # assume we're closer to the tip than the root
1302 1282 # and start by examining the heads
1303 1283 self.ui.status(_("searching for changes\n"))
1304 1284
1305 1285 unknown = []
1306 1286 for h in heads:
1307 1287 if h not in m:
1308 1288 unknown.append(h)
1309 1289 else:
1310 1290 base[h] = 1
1311 1291
1312 1292 heads = unknown
1313 1293 if not unknown:
1314 1294 return base.keys(), [], []
1315 1295
1316 1296 req = dict.fromkeys(unknown)
1317 1297 reqcnt = 0
1318 1298
1319 1299 # search through remote branches
1320 1300 # a 'branch' here is a linear segment of history, with four parts:
1321 1301 # head, root, first parent, second parent
1322 1302 # (a branch always has two parents (or none) by definition)
1323 1303 unknown = remote.branches(unknown)
1324 1304 while unknown:
1325 1305 r = []
1326 1306 while unknown:
1327 1307 n = unknown.pop(0)
1328 1308 if n[0] in seen:
1329 1309 continue
1330 1310
1331 1311 self.ui.debug(_("examining %s:%s\n")
1332 1312 % (short(n[0]), short(n[1])))
1333 1313 if n[0] == nullid: # found the end of the branch
1334 1314 pass
1335 1315 elif n in seenbranch:
1336 1316 self.ui.debug(_("branch already found\n"))
1337 1317 continue
1338 1318 elif n[1] and n[1] in m: # do we know the base?
1339 1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1340 1320 % (short(n[0]), short(n[1])))
1341 1321 search.append(n[0:2]) # schedule branch range for scanning
1342 1322 seenbranch[n] = 1
1343 1323 else:
1344 1324 if n[1] not in seen and n[1] not in fetch:
1345 1325 if n[2] in m and n[3] in m:
1346 1326 self.ui.debug(_("found new changeset %s\n") %
1347 1327 short(n[1]))
1348 1328 fetch[n[1]] = 1 # earliest unknown
1349 1329 for p in n[2:4]:
1350 1330 if p in m:
1351 1331 base[p] = 1 # latest known
1352 1332
1353 1333 for p in n[2:4]:
1354 1334 if p not in req and p not in m:
1355 1335 r.append(p)
1356 1336 req[p] = 1
1357 1337 seen[n[0]] = 1
1358 1338
1359 1339 if r:
1360 1340 reqcnt += 1
1361 1341 self.ui.debug(_("request %d: %s\n") %
1362 1342 (reqcnt, " ".join(map(short, r))))
1363 1343 for p in xrange(0, len(r), 10):
1364 1344 for b in remote.branches(r[p:p+10]):
1365 1345 self.ui.debug(_("received %s:%s\n") %
1366 1346 (short(b[0]), short(b[1])))
1367 1347 unknown.append(b)
1368 1348
1369 1349 # do binary search on the branches we found
1370 1350 while search:
1371 1351 newsearch = []
1372 1352 reqcnt += 1
1373 1353 for n, l in zip(search, remote.between(search)):
1374 1354 l.append(n[1])
1375 1355 p = n[0]
1376 1356 f = 1
1377 1357 for i in l:
1378 1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1379 1359 if i in m:
1380 1360 if f <= 2:
1381 1361 self.ui.debug(_("found new branch changeset %s\n") %
1382 1362 short(p))
1383 1363 fetch[p] = 1
1384 1364 base[i] = 1
1385 1365 else:
1386 1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1387 1367 % (short(p), short(i)))
1388 1368 newsearch.append((p, i))
1389 1369 break
1390 1370 p, f = i, f * 2
1391 1371 search = newsearch
1392 1372
1393 1373 # sanity check our fetch list
1394 1374 for f in fetch.keys():
1395 1375 if f in m:
1396 1376 raise error.RepoError(_("already have changeset ")
1397 1377 + short(f[:4]))
1398 1378
1399 1379 if base.keys() == [nullid]:
1400 1380 if force:
1401 1381 self.ui.warn(_("warning: repository is unrelated\n"))
1402 1382 else:
1403 1383 raise util.Abort(_("repository is unrelated"))
1404 1384
1405 1385 self.ui.debug(_("found new changesets starting at ") +
1406 1386 " ".join([short(f) for f in fetch]) + "\n")
1407 1387
1408 1388 self.ui.debug(_("%d total queries\n") % reqcnt)
1409 1389
1410 1390 return base.keys(), fetch.keys(), heads
1411 1391
1412 1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1413 1393 """Return list of nodes that are roots of subsets not in remote
1414 1394
1415 1395 If base dict is specified, assume that these nodes and their parents
1416 1396 exist on the remote side.
1417 1397 If a list of heads is specified, return only nodes which are heads
1418 1398 or ancestors of these heads, and return a second element which
1419 1399 contains all remote heads which get new children.
1420 1400 """
1421 1401 if base == None:
1422 1402 base = {}
1423 1403 self.findincoming(remote, base, heads, force=force)
1424 1404
1425 1405 self.ui.debug(_("common changesets up to ")
1426 1406 + " ".join(map(short, base.keys())) + "\n")
1427 1407
1428 1408 remain = dict.fromkeys(self.changelog.nodemap)
1429 1409
1430 1410 # prune everything remote has from the tree
1431 1411 del remain[nullid]
1432 1412 remove = base.keys()
1433 1413 while remove:
1434 1414 n = remove.pop(0)
1435 1415 if n in remain:
1436 1416 del remain[n]
1437 1417 for p in self.changelog.parents(n):
1438 1418 remove.append(p)
1439 1419
1440 1420 # find every node whose parents have been pruned
1441 1421 subset = []
1442 1422 # find every remote head that will get new children
1443 1423 updated_heads = {}
1444 1424 for n in remain:
1445 1425 p1, p2 = self.changelog.parents(n)
1446 1426 if p1 not in remain and p2 not in remain:
1447 1427 subset.append(n)
1448 1428 if heads:
1449 1429 if p1 in heads:
1450 1430 updated_heads[p1] = True
1451 1431 if p2 in heads:
1452 1432 updated_heads[p2] = True
1453 1433
1454 1434 # this is the set of all roots we have to push
1455 1435 if heads:
1456 1436 return subset, updated_heads.keys()
1457 1437 else:
1458 1438 return subset
1459 1439
1460 1440 def pull(self, remote, heads=None, force=False):
1461 1441 lock = self.lock()
1462 1442 try:
1463 1443 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1464 1444 force=force)
1465 1445 if fetch == [nullid]:
1466 1446 self.ui.status(_("requesting all changes\n"))
1467 1447
1468 1448 if not fetch:
1469 1449 self.ui.status(_("no changes found\n"))
1470 1450 return 0
1471 1451
1472 1452 if heads is None and remote.capable('changegroupsubset'):
1473 1453 heads = rheads
1474 1454
1475 1455 if heads is None:
1476 1456 cg = remote.changegroup(fetch, 'pull')
1477 1457 else:
1478 1458 if not remote.capable('changegroupsubset'):
1479 1459 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1480 1460 cg = remote.changegroupsubset(fetch, heads, 'pull')
1481 1461 return self.addchangegroup(cg, 'pull', remote.url())
1482 1462 finally:
1483 1463 del lock
1484 1464
1485 1465 def push(self, remote, force=False, revs=None):
1486 1466 # there are two ways to push to remote repo:
1487 1467 #
1488 1468 # addchangegroup assumes local user can lock remote
1489 1469 # repo (local filesystem, old ssh servers).
1490 1470 #
1491 1471 # unbundle assumes local user cannot lock remote repo (new ssh
1492 1472 # servers, http servers).
1493 1473
1494 1474 if remote.capable('unbundle'):
1495 1475 return self.push_unbundle(remote, force, revs)
1496 1476 return self.push_addchangegroup(remote, force, revs)
1497 1477
1498 1478 def prepush(self, remote, force, revs):
1499 1479 common = {}
1500 1480 remote_heads = remote.heads()
1501 1481 inc = self.findincoming(remote, common, remote_heads, force=force)
1502 1482
1503 1483 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1504 1484 if revs is not None:
1505 1485 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1506 1486 else:
1507 1487 bases, heads = update, self.changelog.heads()
1508 1488
1509 1489 if not bases:
1510 1490 self.ui.status(_("no changes found\n"))
1511 1491 return None, 1
1512 1492 elif not force:
1513 1493 # check if we're creating new remote heads
1514 1494 # to be a remote head after push, node must be either
1515 1495 # - unknown locally
1516 1496 # - a local outgoing head descended from update
1517 1497 # - a remote head that's known locally and not
1518 1498 # ancestral to an outgoing head
1519 1499
1520 1500 warn = 0
1521 1501
1522 1502 if remote_heads == [nullid]:
1523 1503 warn = 0
1524 1504 elif not revs and len(heads) > len(remote_heads):
1525 1505 warn = 1
1526 1506 else:
1527 1507 newheads = list(heads)
1528 1508 for r in remote_heads:
1529 1509 if r in self.changelog.nodemap:
1530 1510 desc = self.changelog.heads(r, heads)
1531 1511 l = [h for h in heads if h in desc]
1532 1512 if not l:
1533 1513 newheads.append(r)
1534 1514 else:
1535 1515 newheads.append(r)
1536 1516 if len(newheads) > len(remote_heads):
1537 1517 warn = 1
1538 1518
1539 1519 if warn:
1540 1520 self.ui.warn(_("abort: push creates new remote heads!\n"))
1541 1521 self.ui.status(_("(did you forget to merge?"
1542 1522 " use push -f to force)\n"))
1543 1523 return None, 0
1544 1524 elif inc:
1545 1525 self.ui.warn(_("note: unsynced remote changes!\n"))
1546 1526
1547 1527
1548 1528 if revs is None:
1549 1529 # use the fast path, no race possible on push
1550 1530 cg = self._changegroup(common.keys(), 'push')
1551 1531 else:
1552 1532 cg = self.changegroupsubset(update, revs, 'push')
1553 1533 return cg, remote_heads
1554 1534
1555 1535 def push_addchangegroup(self, remote, force, revs):
1556 1536 lock = remote.lock()
1557 1537 try:
1558 1538 ret = self.prepush(remote, force, revs)
1559 1539 if ret[0] is not None:
1560 1540 cg, remote_heads = ret
1561 1541 return remote.addchangegroup(cg, 'push', self.url())
1562 1542 return ret[1]
1563 1543 finally:
1564 1544 del lock
1565 1545
1566 1546 def push_unbundle(self, remote, force, revs):
1567 1547 # local repo finds heads on server, finds out what revs it
1568 1548 # must push. once revs transferred, if server finds it has
1569 1549 # different heads (someone else won commit/push race), server
1570 1550 # aborts.
1571 1551
1572 1552 ret = self.prepush(remote, force, revs)
1573 1553 if ret[0] is not None:
1574 1554 cg, remote_heads = ret
1575 1555 if force: remote_heads = ['force']
1576 1556 return remote.unbundle(cg, remote_heads, 'push')
1577 1557 return ret[1]
1578 1558
1579 1559 def changegroupinfo(self, nodes, source):
1580 1560 if self.ui.verbose or source == 'bundle':
1581 1561 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 1562 if self.ui.debugflag:
1583 1563 self.ui.debug(_("list of changesets:\n"))
1584 1564 for node in nodes:
1585 1565 self.ui.debug("%s\n" % hex(node))
1586 1566
1587 1567 def changegroupsubset(self, bases, heads, source, extranodes=None):
1588 1568 """This function generates a changegroup consisting of all the nodes
1589 1569 that are descendents of any of the bases, and ancestors of any of
1590 1570 the heads.
1591 1571
1592 1572 It is fairly complex as determining which filenodes and which
1593 1573 manifest nodes need to be included for the changeset to be complete
1594 1574 is non-trivial.
1595 1575
1596 1576 Another wrinkle is doing the reverse, figuring out which changeset in
1597 1577 the changegroup a particular filenode or manifestnode belongs to.
1598 1578
1599 1579 The caller can specify some nodes that must be included in the
1600 1580 changegroup using the extranodes argument. It should be a dict
1601 1581 where the keys are the filenames (or 1 for the manifest), and the
1602 1582 values are lists of (node, linknode) tuples, where node is a wanted
1603 1583 node and linknode is the changelog node that should be transmitted as
1604 1584 the linkrev.
1605 1585 """
1606 1586
1607 1587 if extranodes is None:
1608 1588 # can we go through the fast path ?
1609 1589 heads.sort()
1610 1590 allheads = self.heads()
1611 1591 allheads.sort()
1612 1592 if heads == allheads:
1613 1593 common = []
1614 1594 # parents of bases are known from both sides
1615 1595 for n in bases:
1616 1596 for p in self.changelog.parents(n):
1617 1597 if p != nullid:
1618 1598 common.append(p)
1619 1599 return self._changegroup(common, source)
1620 1600
1621 1601 self.hook('preoutgoing', throw=True, source=source)
1622 1602
1623 1603 # Set up some initial variables
1624 1604 # Make it easy to refer to self.changelog
1625 1605 cl = self.changelog
1626 1606 # msng is short for missing - compute the list of changesets in this
1627 1607 # changegroup.
1628 1608 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1629 1609 self.changegroupinfo(msng_cl_lst, source)
1630 1610 # Some bases may turn out to be superfluous, and some heads may be
1631 1611 # too. nodesbetween will return the minimal set of bases and heads
1632 1612 # necessary to re-create the changegroup.
1633 1613
1634 1614 # Known heads are the list of heads that it is assumed the recipient
1635 1615 # of this changegroup will know about.
1636 1616 knownheads = {}
1637 1617 # We assume that all parents of bases are known heads.
1638 1618 for n in bases:
1639 1619 for p in cl.parents(n):
1640 1620 if p != nullid:
1641 1621 knownheads[p] = 1
1642 1622 knownheads = knownheads.keys()
1643 1623 if knownheads:
1644 1624 # Now that we know what heads are known, we can compute which
1645 1625 # changesets are known. The recipient must know about all
1646 1626 # changesets required to reach the known heads from the null
1647 1627 # changeset.
1648 1628 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1649 1629 junk = None
1650 1630 # Transform the list into an ersatz set.
1651 1631 has_cl_set = dict.fromkeys(has_cl_set)
1652 1632 else:
1653 1633 # If there were no known heads, the recipient cannot be assumed to
1654 1634 # know about any changesets.
1655 1635 has_cl_set = {}
1656 1636
1657 1637 # Make it easy to refer to self.manifest
1658 1638 mnfst = self.manifest
1659 1639 # We don't know which manifests are missing yet
1660 1640 msng_mnfst_set = {}
1661 1641 # Nor do we know which filenodes are missing.
1662 1642 msng_filenode_set = {}
1663 1643
1664 1644 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1665 1645 junk = None
1666 1646
1667 1647 # A changeset always belongs to itself, so the changenode lookup
1668 1648 # function for a changenode is identity.
1669 1649 def identity(x):
1670 1650 return x
1671 1651
1672 1652 # A function generating function. Sets up an environment for the
1673 1653 # inner function.
1674 1654 def cmp_by_rev_func(revlog):
1675 1655 # Compare two nodes by their revision number in the environment's
1676 1656 # revision history. Since the revision number both represents the
1677 1657 # most efficient order to read the nodes in, and represents a
1678 1658 # topological sorting of the nodes, this function is often useful.
1679 1659 def cmp_by_rev(a, b):
1680 1660 return cmp(revlog.rev(a), revlog.rev(b))
1681 1661 return cmp_by_rev
1682 1662
1683 1663 # If we determine that a particular file or manifest node must be a
1684 1664 # node that the recipient of the changegroup will already have, we can
1685 1665 # also assume the recipient will have all the parents. This function
1686 1666 # prunes them from the set of missing nodes.
1687 1667 def prune_parents(revlog, hasset, msngset):
1688 1668 haslst = hasset.keys()
1689 1669 haslst.sort(cmp_by_rev_func(revlog))
1690 1670 for node in haslst:
1691 1671 parentlst = [p for p in revlog.parents(node) if p != nullid]
1692 1672 while parentlst:
1693 1673 n = parentlst.pop()
1694 1674 if n not in hasset:
1695 1675 hasset[n] = 1
1696 1676 p = [p for p in revlog.parents(n) if p != nullid]
1697 1677 parentlst.extend(p)
1698 1678 for n in hasset:
1699 1679 msngset.pop(n, None)
1700 1680
1701 1681 # This is a function generating function used to set up an environment
1702 1682 # for the inner function to execute in.
1703 1683 def manifest_and_file_collector(changedfileset):
1704 1684 # This is an information gathering function that gathers
1705 1685 # information from each changeset node that goes out as part of
1706 1686 # the changegroup. The information gathered is a list of which
1707 1687 # manifest nodes are potentially required (the recipient may
1708 1688 # already have them) and total list of all files which were
1709 1689 # changed in any changeset in the changegroup.
1710 1690 #
1711 1691 # We also remember the first changenode we saw any manifest
1712 1692 # referenced by so we can later determine which changenode 'owns'
1713 1693 # the manifest.
1714 1694 def collect_manifests_and_files(clnode):
1715 1695 c = cl.read(clnode)
1716 1696 for f in c[3]:
1717 1697 # This is to make sure we only have one instance of each
1718 1698 # filename string for each filename.
1719 1699 changedfileset.setdefault(f, f)
1720 1700 msng_mnfst_set.setdefault(c[0], clnode)
1721 1701 return collect_manifests_and_files
1722 1702
1723 1703 # Figure out which manifest nodes (of the ones we think might be part
1724 1704 # of the changegroup) the recipient must know about and remove them
1725 1705 # from the changegroup.
1726 1706 def prune_manifests():
1727 1707 has_mnfst_set = {}
1728 1708 for n in msng_mnfst_set:
1729 1709 # If a 'missing' manifest thinks it belongs to a changenode
1730 1710 # the recipient is assumed to have, obviously the recipient
1731 1711 # must have that manifest.
1732 1712 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1733 1713 if linknode in has_cl_set:
1734 1714 has_mnfst_set[n] = 1
1735 1715 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1736 1716
1737 1717 # Use the information collected in collect_manifests_and_files to say
1738 1718 # which changenode any manifestnode belongs to.
1739 1719 def lookup_manifest_link(mnfstnode):
1740 1720 return msng_mnfst_set[mnfstnode]
1741 1721
1742 1722 # A function generating function that sets up the initial environment
1743 1723 # the inner function.
1744 1724 def filenode_collector(changedfiles):
1745 1725 next_rev = [0]
1746 1726 # This gathers information from each manifestnode included in the
1747 1727 # changegroup about which filenodes the manifest node references
1748 1728 # so we can include those in the changegroup too.
1749 1729 #
1750 1730 # It also remembers which changenode each filenode belongs to. It
1751 1731 # does this by assuming the a filenode belongs to the changenode
1752 1732 # the first manifest that references it belongs to.
1753 1733 def collect_msng_filenodes(mnfstnode):
1754 1734 r = mnfst.rev(mnfstnode)
1755 1735 if r == next_rev[0]:
1756 1736 # If the last rev we looked at was the one just previous,
1757 1737 # we only need to see a diff.
1758 1738 deltamf = mnfst.readdelta(mnfstnode)
1759 1739 # For each line in the delta
1760 1740 for f, fnode in deltamf.iteritems():
1761 1741 f = changedfiles.get(f, None)
1762 1742 # And if the file is in the list of files we care
1763 1743 # about.
1764 1744 if f is not None:
1765 1745 # Get the changenode this manifest belongs to
1766 1746 clnode = msng_mnfst_set[mnfstnode]
1767 1747 # Create the set of filenodes for the file if
1768 1748 # there isn't one already.
1769 1749 ndset = msng_filenode_set.setdefault(f, {})
1770 1750 # And set the filenode's changelog node to the
1771 1751 # manifest's if it hasn't been set already.
1772 1752 ndset.setdefault(fnode, clnode)
1773 1753 else:
1774 1754 # Otherwise we need a full manifest.
1775 1755 m = mnfst.read(mnfstnode)
1776 1756 # For every file in we care about.
1777 1757 for f in changedfiles:
1778 1758 fnode = m.get(f, None)
1779 1759 # If it's in the manifest
1780 1760 if fnode is not None:
1781 1761 # See comments above.
1782 1762 clnode = msng_mnfst_set[mnfstnode]
1783 1763 ndset = msng_filenode_set.setdefault(f, {})
1784 1764 ndset.setdefault(fnode, clnode)
1785 1765 # Remember the revision we hope to see next.
1786 1766 next_rev[0] = r + 1
1787 1767 return collect_msng_filenodes
1788 1768
1789 1769 # We have a list of filenodes we think we need for a file, lets remove
1790 1770 # all those we now the recipient must have.
1791 1771 def prune_filenodes(f, filerevlog):
1792 1772 msngset = msng_filenode_set[f]
1793 1773 hasset = {}
1794 1774 # If a 'missing' filenode thinks it belongs to a changenode we
1795 1775 # assume the recipient must have, then the recipient must have
1796 1776 # that filenode.
1797 1777 for n in msngset:
1798 1778 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1799 1779 if clnode in has_cl_set:
1800 1780 hasset[n] = 1
1801 1781 prune_parents(filerevlog, hasset, msngset)
1802 1782
1803 1783 # A function generator function that sets up the a context for the
1804 1784 # inner function.
1805 1785 def lookup_filenode_link_func(fname):
1806 1786 msngset = msng_filenode_set[fname]
1807 1787 # Lookup the changenode the filenode belongs to.
1808 1788 def lookup_filenode_link(fnode):
1809 1789 return msngset[fnode]
1810 1790 return lookup_filenode_link
1811 1791
1812 1792 # Add the nodes that were explicitly requested.
1813 1793 def add_extra_nodes(name, nodes):
1814 1794 if not extranodes or name not in extranodes:
1815 1795 return
1816 1796
1817 1797 for node, linknode in extranodes[name]:
1818 1798 if node not in nodes:
1819 1799 nodes[node] = linknode
1820 1800
1821 1801 # Now that we have all theses utility functions to help out and
1822 1802 # logically divide up the task, generate the group.
1823 1803 def gengroup():
1824 1804 # The set of changed files starts empty.
1825 1805 changedfiles = {}
1826 1806 # Create a changenode group generator that will call our functions
1827 1807 # back to lookup the owning changenode and collect information.
1828 1808 group = cl.group(msng_cl_lst, identity,
1829 1809 manifest_and_file_collector(changedfiles))
1830 1810 for chnk in group:
1831 1811 yield chnk
1832 1812
1833 1813 # The list of manifests has been collected by the generator
1834 1814 # calling our functions back.
1835 1815 prune_manifests()
1836 1816 add_extra_nodes(1, msng_mnfst_set)
1837 1817 msng_mnfst_lst = msng_mnfst_set.keys()
1838 1818 # Sort the manifestnodes by revision number.
1839 1819 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1840 1820 # Create a generator for the manifestnodes that calls our lookup
1841 1821 # and data collection functions back.
1842 1822 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1843 1823 filenode_collector(changedfiles))
1844 1824 for chnk in group:
1845 1825 yield chnk
1846 1826
1847 1827 # These are no longer needed, dereference and toss the memory for
1848 1828 # them.
1849 1829 msng_mnfst_lst = None
1850 1830 msng_mnfst_set.clear()
1851 1831
1852 1832 if extranodes:
1853 1833 for fname in extranodes:
1854 1834 if isinstance(fname, int):
1855 1835 continue
1856 1836 msng_filenode_set.setdefault(fname, {})
1857 1837 changedfiles[fname] = 1
1858 1838 # Go through all our files in order sorted by name.
1859 1839 for fname in util.sort(changedfiles):
1860 1840 filerevlog = self.file(fname)
1861 1841 if not len(filerevlog):
1862 1842 raise util.Abort(_("empty or missing revlog for %s") % fname)
1863 1843 # Toss out the filenodes that the recipient isn't really
1864 1844 # missing.
1865 1845 if fname in msng_filenode_set:
1866 1846 prune_filenodes(fname, filerevlog)
1867 1847 add_extra_nodes(fname, msng_filenode_set[fname])
1868 1848 msng_filenode_lst = msng_filenode_set[fname].keys()
1869 1849 else:
1870 1850 msng_filenode_lst = []
1871 1851 # If any filenodes are left, generate the group for them,
1872 1852 # otherwise don't bother.
1873 1853 if len(msng_filenode_lst) > 0:
1874 1854 yield changegroup.chunkheader(len(fname))
1875 1855 yield fname
1876 1856 # Sort the filenodes by their revision #
1877 1857 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1878 1858 # Create a group generator and only pass in a changenode
1879 1859 # lookup function as we need to collect no information
1880 1860 # from filenodes.
1881 1861 group = filerevlog.group(msng_filenode_lst,
1882 1862 lookup_filenode_link_func(fname))
1883 1863 for chnk in group:
1884 1864 yield chnk
1885 1865 if fname in msng_filenode_set:
1886 1866 # Don't need this anymore, toss it to free memory.
1887 1867 del msng_filenode_set[fname]
1888 1868 # Signal that no more groups are left.
1889 1869 yield changegroup.closechunk()
1890 1870
1891 1871 if msng_cl_lst:
1892 1872 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1893 1873
1894 1874 return util.chunkbuffer(gengroup())
1895 1875
1896 1876 def changegroup(self, basenodes, source):
1897 1877 # to avoid a race we use changegroupsubset() (issue1320)
1898 1878 return self.changegroupsubset(basenodes, self.heads(), source)
1899 1879
1900 1880 def _changegroup(self, common, source):
1901 1881 """Generate a changegroup of all nodes that we have that a recipient
1902 1882 doesn't.
1903 1883
1904 1884 This is much easier than the previous function as we can assume that
1905 1885 the recipient has any changenode we aren't sending them.
1906 1886
1907 1887 common is the set of common nodes between remote and self"""
1908 1888
1909 1889 self.hook('preoutgoing', throw=True, source=source)
1910 1890
1911 1891 cl = self.changelog
1912 1892 nodes = cl.findmissing(common)
1913 1893 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1914 1894 self.changegroupinfo(nodes, source)
1915 1895
1916 1896 def identity(x):
1917 1897 return x
1918 1898
1919 1899 def gennodelst(log):
1920 1900 for r in log:
1921 1901 if log.linkrev(r) in revset:
1922 1902 yield log.node(r)
1923 1903
1924 1904 def changed_file_collector(changedfileset):
1925 1905 def collect_changed_files(clnode):
1926 1906 c = cl.read(clnode)
1927 1907 for fname in c[3]:
1928 1908 changedfileset[fname] = 1
1929 1909 return collect_changed_files
1930 1910
1931 1911 def lookuprevlink_func(revlog):
1932 1912 def lookuprevlink(n):
1933 1913 return cl.node(revlog.linkrev(revlog.rev(n)))
1934 1914 return lookuprevlink
1935 1915
1936 1916 def gengroup():
1937 1917 # construct a list of all changed files
1938 1918 changedfiles = {}
1939 1919
1940 1920 for chnk in cl.group(nodes, identity,
1941 1921 changed_file_collector(changedfiles)):
1942 1922 yield chnk
1943 1923
1944 1924 mnfst = self.manifest
1945 1925 nodeiter = gennodelst(mnfst)
1946 1926 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1947 1927 yield chnk
1948 1928
1949 1929 for fname in util.sort(changedfiles):
1950 1930 filerevlog = self.file(fname)
1951 1931 if not len(filerevlog):
1952 1932 raise util.Abort(_("empty or missing revlog for %s") % fname)
1953 1933 nodeiter = gennodelst(filerevlog)
1954 1934 nodeiter = list(nodeiter)
1955 1935 if nodeiter:
1956 1936 yield changegroup.chunkheader(len(fname))
1957 1937 yield fname
1958 1938 lookup = lookuprevlink_func(filerevlog)
1959 1939 for chnk in filerevlog.group(nodeiter, lookup):
1960 1940 yield chnk
1961 1941
1962 1942 yield changegroup.closechunk()
1963 1943
1964 1944 if nodes:
1965 1945 self.hook('outgoing', node=hex(nodes[0]), source=source)
1966 1946
1967 1947 return util.chunkbuffer(gengroup())
1968 1948
1969 1949 def addchangegroup(self, source, srctype, url, emptyok=False):
1970 1950 """add changegroup to repo.
1971 1951
1972 1952 return values:
1973 1953 - nothing changed or no source: 0
1974 1954 - more heads than before: 1+added heads (2..n)
1975 1955 - less heads than before: -1-removed heads (-2..-n)
1976 1956 - number of heads stays the same: 1
1977 1957 """
1978 1958 def csmap(x):
1979 1959 self.ui.debug(_("add changeset %s\n") % short(x))
1980 1960 return len(cl)
1981 1961
1982 1962 def revmap(x):
1983 1963 return cl.rev(x)
1984 1964
1985 1965 if not source:
1986 1966 return 0
1987 1967
1988 1968 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1989 1969
1990 1970 changesets = files = revisions = 0
1991 1971
1992 1972 # write changelog data to temp files so concurrent readers will not see
1993 1973 # inconsistent view
1994 1974 cl = self.changelog
1995 1975 cl.delayupdate()
1996 1976 oldheads = len(cl.heads())
1997 1977
1998 1978 tr = self.transaction()
1999 1979 try:
2000 1980 trp = weakref.proxy(tr)
2001 1981 # pull off the changeset group
2002 1982 self.ui.status(_("adding changesets\n"))
2003 1983 cor = len(cl) - 1
2004 1984 chunkiter = changegroup.chunkiter(source)
2005 1985 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2006 1986 raise util.Abort(_("received changelog group is empty"))
2007 1987 cnr = len(cl) - 1
2008 1988 changesets = cnr - cor
2009 1989
2010 1990 # pull off the manifest group
2011 1991 self.ui.status(_("adding manifests\n"))
2012 1992 chunkiter = changegroup.chunkiter(source)
2013 1993 # no need to check for empty manifest group here:
2014 1994 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2015 1995 # no new manifest will be created and the manifest group will
2016 1996 # be empty during the pull
2017 1997 self.manifest.addgroup(chunkiter, revmap, trp)
2018 1998
2019 1999 # process the files
2020 2000 self.ui.status(_("adding file changes\n"))
2021 2001 while 1:
2022 2002 f = changegroup.getchunk(source)
2023 2003 if not f:
2024 2004 break
2025 2005 self.ui.debug(_("adding %s revisions\n") % f)
2026 2006 fl = self.file(f)
2027 2007 o = len(fl)
2028 2008 chunkiter = changegroup.chunkiter(source)
2029 2009 if fl.addgroup(chunkiter, revmap, trp) is None:
2030 2010 raise util.Abort(_("received file revlog group is empty"))
2031 2011 revisions += len(fl) - o
2032 2012 files += 1
2033 2013
2034 2014 # make changelog see real files again
2035 2015 cl.finalize(trp)
2036 2016
2037 2017 newheads = len(self.changelog.heads())
2038 2018 heads = ""
2039 2019 if oldheads and newheads != oldheads:
2040 2020 heads = _(" (%+d heads)") % (newheads - oldheads)
2041 2021
2042 2022 self.ui.status(_("added %d changesets"
2043 2023 " with %d changes to %d files%s\n")
2044 2024 % (changesets, revisions, files, heads))
2045 2025
2046 2026 if changesets > 0:
2047 2027 self.hook('pretxnchangegroup', throw=True,
2048 2028 node=hex(self.changelog.node(cor+1)), source=srctype,
2049 2029 url=url)
2050 2030
2051 2031 tr.close()
2052 2032 finally:
2053 2033 del tr
2054 2034
2055 2035 if changesets > 0:
2056 2036 # forcefully update the on-disk branch cache
2057 2037 self.ui.debug(_("updating the branch cache\n"))
2058 2038 self.branchtags()
2059 2039 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2060 2040 source=srctype, url=url)
2061 2041
2062 2042 for i in xrange(cor + 1, cnr + 1):
2063 2043 self.hook("incoming", node=hex(self.changelog.node(i)),
2064 2044 source=srctype, url=url)
2065 2045
2066 2046 # never return 0 here:
2067 2047 if newheads < oldheads:
2068 2048 return newheads - oldheads - 1
2069 2049 else:
2070 2050 return newheads - oldheads + 1
2071 2051
2072 2052
2073 2053 def stream_in(self, remote):
2074 2054 fp = remote.stream_out()
2075 2055 l = fp.readline()
2076 2056 try:
2077 2057 resp = int(l)
2078 2058 except ValueError:
2079 2059 raise error.ResponseError(
2080 2060 _('Unexpected response from remote server:'), l)
2081 2061 if resp == 1:
2082 2062 raise util.Abort(_('operation forbidden by server'))
2083 2063 elif resp == 2:
2084 2064 raise util.Abort(_('locking the remote repository failed'))
2085 2065 elif resp != 0:
2086 2066 raise util.Abort(_('the server sent an unknown error code'))
2087 2067 self.ui.status(_('streaming all changes\n'))
2088 2068 l = fp.readline()
2089 2069 try:
2090 2070 total_files, total_bytes = map(int, l.split(' ', 1))
2091 2071 except (ValueError, TypeError):
2092 2072 raise error.ResponseError(
2093 2073 _('Unexpected response from remote server:'), l)
2094 2074 self.ui.status(_('%d files to transfer, %s of data\n') %
2095 2075 (total_files, util.bytecount(total_bytes)))
2096 2076 start = time.time()
2097 2077 for i in xrange(total_files):
2098 2078 # XXX doesn't support '\n' or '\r' in filenames
2099 2079 l = fp.readline()
2100 2080 try:
2101 2081 name, size = l.split('\0', 1)
2102 2082 size = int(size)
2103 2083 except (ValueError, TypeError):
2104 2084 raise error.ResponseError(
2105 2085 _('Unexpected response from remote server:'), l)
2106 2086 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2107 2087 ofp = self.sopener(name, 'w')
2108 2088 for chunk in util.filechunkiter(fp, limit=size):
2109 2089 ofp.write(chunk)
2110 2090 ofp.close()
2111 2091 elapsed = time.time() - start
2112 2092 if elapsed <= 0:
2113 2093 elapsed = 0.001
2114 2094 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2115 2095 (util.bytecount(total_bytes), elapsed,
2116 2096 util.bytecount(total_bytes / elapsed)))
2117 2097 self.invalidate()
2118 2098 return len(self.heads()) + 1
2119 2099
2120 2100 def clone(self, remote, heads=[], stream=False):
2121 2101 '''clone remote repository.
2122 2102
2123 2103 keyword arguments:
2124 2104 heads: list of revs to clone (forces use of pull)
2125 2105 stream: use streaming clone if possible'''
2126 2106
2127 2107 # now, all clients that can request uncompressed clones can
2128 2108 # read repo formats supported by all servers that can serve
2129 2109 # them.
2130 2110
2131 2111 # if revlog format changes, client will have to check version
2132 2112 # and format flags on "stream" capability, and use
2133 2113 # uncompressed only if compatible.
2134 2114
2135 2115 if stream and not heads and remote.capable('stream'):
2136 2116 return self.stream_in(remote)
2137 2117 return self.pull(remote, heads)
2138 2118
2139 2119 # used to avoid circular references so destructors work
2140 2120 def aftertrans(files):
2141 2121 renamefiles = [tuple(t) for t in files]
2142 2122 def a():
2143 2123 for src, dest in renamefiles:
2144 2124 util.rename(src, dest)
2145 2125 return a
2146 2126
2147 2127 def instance(ui, path, create):
2148 2128 return localrepository(ui, util.drop_scheme('file', path), create)
2149 2129
2150 2130 def islocal(path):
2151 2131 return True
@@ -1,56 +1,56
1 1 % before commit
2 2 % store can be written by the group, other files cannot
3 3 % store is setgid
4 4 00700 ./.hg/
5 5 00600 ./.hg/00changelog.i
6 6 00600 ./.hg/requires
7 7 00770 ./.hg/store/
8 8
9 9 % after commit
10 10 % working dir files can only be written by the owner
11 11 % files created in .hg can be written by the group
12 12 % (in particular, store/**, dirstate, branch cache file, undo files)
13 13 % new directories are setgid
14 14 00700 ./.hg/
15 15 00600 ./.hg/00changelog.i
16 16 00660 ./.hg/dirstate
17 17 00600 ./.hg/requires
18 18 00770 ./.hg/store/
19 19 00660 ./.hg/store/00changelog.i
20 20 00660 ./.hg/store/00manifest.i
21 21 00770 ./.hg/store/data/
22 22 00770 ./.hg/store/data/dir/
23 23 00660 ./.hg/store/data/dir/bar.i
24 24 00660 ./.hg/store/data/foo.i
25 25 00660 ./.hg/store/fncache
26 26 00660 ./.hg/store/undo
27 27 00660 ./.hg/undo.branch
28 28 00660 ./.hg/undo.dirstate
29 29 00700 ./dir/
30 30 00600 ./dir/bar
31 31 00600 ./foo
32 32
33 33 % before push
34 34 % group can write everything
35 35 00770 ../push/.hg/
36 36 00660 ../push/.hg/00changelog.i
37 37 00660 ../push/.hg/requires
38 38 00770 ../push/.hg/store/
39 39
40 40 % after push
41 41 % group can still write everything
42 42 00770 ../push/.hg/
43 43 00660 ../push/.hg/00changelog.i
44 00660 ../push/.hg/branch.cache
44 00660 ../push/.hg/branchheads.cache
45 45 00660 ../push/.hg/requires
46 46 00770 ../push/.hg/store/
47 47 00660 ../push/.hg/store/00changelog.i
48 48 00660 ../push/.hg/store/00manifest.i
49 49 00770 ../push/.hg/store/data/
50 50 00770 ../push/.hg/store/data/dir/
51 51 00660 ../push/.hg/store/data/dir/bar.i
52 52 00660 ../push/.hg/store/data/foo.i
53 53 00660 ../push/.hg/store/fncache
54 54 00660 ../push/.hg/store/undo
55 55 00660 ../push/.hg/undo.branch
56 56 00660 ../push/.hg/undo.dirstate
@@ -1,78 +1,78
1 1 #!/bin/sh
2 2
3 branches=.hg/branch.cache
3 branches=.hg/branchheads.cache
4 4 echo '[extensions]' >> $HGRCPATH
5 5 echo 'hgext.mq=' >> $HGRCPATH
6 6
7 7 show_branch_cache()
8 8 {
9 9 # force cache (re)generation
10 10 hg log -r does-not-exist 2> /dev/null
11 11 hg log -r tip --template 'tip: #rev#\n'
12 12 if [ -f $branches ]; then
13 13 sort $branches
14 14 else
15 15 echo No branch cache
16 16 fi
17 17 if [ "$1" = 1 ]; then
18 18 for b in foo bar; do
19 19 hg log -r $b --template "branch $b: "'#rev#\n'
20 20 done
21 21 fi
22 22 }
23 23
24 24 hg init a
25 25 cd a
26 26 hg qinit -c
27 27
28 28 echo '# mq patch on an empty repo'
29 29 hg qnew p1
30 30 show_branch_cache
31 31
32 32 echo > pfile
33 33 hg add pfile
34 34 hg qrefresh -m 'patch 1'
35 35 show_branch_cache
36 36
37 37 echo
38 38 echo '# some regular revisions'
39 39 hg qpop
40 40 echo foo > foo
41 41 hg add foo
42 42 echo foo > .hg/branch
43 43 hg ci -m 'branch foo' -d '1000000 0'
44 44
45 45 echo bar > bar
46 46 hg add bar
47 47 echo bar > .hg/branch
48 48 hg ci -m 'branch bar' -d '1000000 0'
49 49 show_branch_cache
50 50
51 51 echo
52 52 echo '# add some mq patches'
53 53 hg qpush
54 54 show_branch_cache
55 55
56 56 hg qnew p2
57 57 echo foo > .hg/branch
58 58 echo foo2 >> foo
59 59 hg qrefresh -m 'patch 2'
60 60 show_branch_cache 1
61 61
62 62 echo
63 63 echo '# removing the cache'
64 64 rm $branches
65 65 show_branch_cache 1
66 66
67 67 echo
68 68 echo '# importing rev 1 (the cache now ends in one of the patches)'
69 69 hg qimport -r 1 -n p0
70 70 show_branch_cache 1
71 71 hg log -r qbase --template 'qbase: #rev#\n'
72 72
73 73 echo
74 74 echo '# detect an invalid cache'
75 75 hg qpop -a
76 76 hg qpush -a
77 77 show_branch_cache
78 78
@@ -1,110 +1,110
1 1 #!/bin/sh
2 2
3 branchcache=.hg/branch.cache
3 branchcache=.hg/branchheads.cache
4 4
5 5 hg init t
6 6 cd t
7 7 hg branches
8 8
9 9 echo foo > a
10 10 hg add a
11 11 hg ci -m "initial" -d "1000000 0"
12 12 hg branch foo
13 13 hg branch
14 14 hg ci -m "add branch name" -d "1000000 0"
15 15 hg branch bar
16 16 hg ci -m "change branch name" -d "1000000 0"
17 17 echo % branch shadowing
18 18 hg branch default
19 19 hg branch -f default
20 20 hg ci -m "clear branch name" -d "1000000 0"
21 21
22 22 hg co foo
23 23 hg branch
24 24 echo bleah > a
25 25 hg ci -m "modify a branch" -d "1000000 0"
26 26
27 27 hg merge default
28 28 hg branch
29 29 hg ci -m "merge" -d "1000000 0"
30 30 hg log
31 31
32 32 hg branches
33 33 hg branches -q
34 34
35 35 echo % test for invalid branch cache
36 36 hg rollback
37 37 cp $branchcache .hg/bc-invalid
38 38 hg log -r foo
39 39 cp .hg/bc-invalid $branchcache
40 40 hg --debug log -r foo
41 41 rm $branchcache
42 42 echo corrupted > $branchcache
43 43 hg log -qr foo
44 44 cat $branchcache
45 45
46 46 echo % push should update the branch cache
47 47 hg init ../target
48 48 echo % pushing just rev 0
49 49 hg push -qr 0 ../target
50 50 cat ../target/$branchcache
51 51 echo % pushing everything
52 52 hg push -qf ../target
53 53 cat ../target/$branchcache
54 54
55 55 echo % update with no arguments: tipmost revision of the current branch
56 56 hg up -q -C 0
57 57 hg up -q
58 58 hg id
59 59 hg up -q 1
60 60 hg up -q
61 61 hg id
62 62 hg branch foobar
63 63 hg up
64 64
65 65 echo % fastforward merge
66 66 hg branch ff
67 67 echo ff > ff
68 68 hg ci -Am'fast forward' -d '1000000 0'
69 69 hg up foo
70 70 hg merge ff
71 71 hg branch
72 72 hg commit -m'Merge ff into foo' -d '1000000 0'
73 73 hg parents
74 74 hg manifest
75 75
76 76 echo % test merging, add 3 default heads and one test head
77 77 cd ..
78 78 hg init merges
79 79 cd merges
80 80 echo a > a
81 81 hg ci -Ama
82 82
83 83 echo b > b
84 84 hg ci -Amb
85 85
86 86 hg up 0
87 87 echo c > c
88 88 hg ci -Amc
89 89
90 90 hg up 0
91 91 echo d > d
92 92 hg ci -Amd
93 93
94 94 hg up 0
95 95 hg branch test
96 96 echo e >> e
97 97 hg ci -Ame
98 98
99 99 hg log
100 100
101 101 echo % implicit merge with test branch as parent
102 102 hg merge
103 103 hg up -C default
104 104 echo % implicit merge with default branch as parent
105 105 hg merge
106 106 echo % 3 branch heads, explicit merge required
107 107 hg merge 2
108 108 hg ci -m merge
109 109 echo % 2 branch heads, implicit merge works
110 110 hg merge
@@ -1,172 +1,174
1 1 marked working directory as branch foo
2 2 foo
3 3 marked working directory as branch bar
4 4 % branch shadowing
5 5 abort: a branch of the same name already exists (use --force to override)
6 6 marked working directory as branch default
7 7 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 8 foo
9 9 created new head
10 10 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 11 (branch merge, don't forget to commit)
12 12 foo
13 13 changeset: 5:5f8fb06e083e
14 14 branch: foo
15 15 tag: tip
16 16 parent: 4:4909a3732169
17 17 parent: 3:bf1bc2f45e83
18 18 user: test
19 19 date: Mon Jan 12 13:46:40 1970 +0000
20 20 summary: merge
21 21
22 22 changeset: 4:4909a3732169
23 23 branch: foo
24 24 parent: 1:b699b1cec9c2
25 25 user: test
26 26 date: Mon Jan 12 13:46:40 1970 +0000
27 27 summary: modify a branch
28 28
29 29 changeset: 3:bf1bc2f45e83
30 30 user: test
31 31 date: Mon Jan 12 13:46:40 1970 +0000
32 32 summary: clear branch name
33 33
34 34 changeset: 2:67ec16bde7f1
35 35 branch: bar
36 36 user: test
37 37 date: Mon Jan 12 13:46:40 1970 +0000
38 38 summary: change branch name
39 39
40 40 changeset: 1:b699b1cec9c2
41 41 branch: foo
42 42 user: test
43 43 date: Mon Jan 12 13:46:40 1970 +0000
44 44 summary: add branch name
45 45
46 46 changeset: 0:be8523e69bf8
47 47 user: test
48 48 date: Mon Jan 12 13:46:40 1970 +0000
49 49 summary: initial
50 50
51 51 foo 5:5f8fb06e083e
52 52 default 3:bf1bc2f45e83 (inactive)
53 53 bar 2:67ec16bde7f1 (inactive)
54 54 foo
55 55 default
56 56 bar
57 57 % test for invalid branch cache
58 58 rolling back last transaction
59 59 changeset: 4:4909a3732169
60 60 branch: foo
61 61 tag: tip
62 62 parent: 1:b699b1cec9c2
63 63 user: test
64 64 date: Mon Jan 12 13:46:40 1970 +0000
65 65 summary: modify a branch
66 66
67 67 invalidating branch cache (tip differs)
68 68 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
69 69 branch: foo
70 70 tag: tip
71 71 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
72 72 parent: -1:0000000000000000000000000000000000000000
73 73 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
74 74 user: test
75 75 date: Mon Jan 12 13:46:40 1970 +0000
76 76 files: a
77 77 extra: branch=foo
78 78 description:
79 79 modify a branch
80 80
81 81
82 82 4:4909a3732169
83 83 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
84 be8523e69bf892e25817fc97187516b3c0804ae4 default
84 85 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
85 86 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
86 87 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
87 88 % push should update the branch cache
88 89 % pushing just rev 0
89 90 be8523e69bf892e25817fc97187516b3c0804ae4 0
90 91 be8523e69bf892e25817fc97187516b3c0804ae4 default
91 92 % pushing everything
92 93 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
94 be8523e69bf892e25817fc97187516b3c0804ae4 default
93 95 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
94 96 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
95 97 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
96 98 % update with no arguments: tipmost revision of the current branch
97 99 bf1bc2f45e83
98 100 4909a3732169 (foo) tip
99 101 marked working directory as branch foobar
100 102 abort: branch foobar not found
101 103 % fastforward merge
102 104 marked working directory as branch ff
103 105 adding ff
104 106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
105 107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 108 (branch merge, don't forget to commit)
107 109 foo
108 110 changeset: 6:f0c74f92a385
109 111 branch: foo
110 112 tag: tip
111 113 parent: 4:4909a3732169
112 114 parent: 5:c420d2121b71
113 115 user: test
114 116 date: Mon Jan 12 13:46:40 1970 +0000
115 117 summary: Merge ff into foo
116 118
117 119 a
118 120 ff
119 121 % test merging, add 3 default heads and one test head
120 122 adding a
121 123 adding b
122 124 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
123 125 adding c
124 126 created new head
125 127 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
126 128 adding d
127 129 created new head
128 130 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
129 131 marked working directory as branch test
130 132 adding e
131 133 created new head
132 134 changeset: 4:3a1e01ed1df4
133 135 branch: test
134 136 tag: tip
135 137 parent: 0:cb9a9f314b8b
136 138 user: test
137 139 date: Thu Jan 01 00:00:00 1970 +0000
138 140 summary: e
139 141
140 142 changeset: 3:980f7dc84c29
141 143 parent: 0:cb9a9f314b8b
142 144 user: test
143 145 date: Thu Jan 01 00:00:00 1970 +0000
144 146 summary: d
145 147
146 148 changeset: 2:d36c0562f908
147 149 parent: 0:cb9a9f314b8b
148 150 user: test
149 151 date: Thu Jan 01 00:00:00 1970 +0000
150 152 summary: c
151 153
152 154 changeset: 1:d2ae7f538514
153 155 user: test
154 156 date: Thu Jan 01 00:00:00 1970 +0000
155 157 summary: b
156 158
157 159 changeset: 0:cb9a9f314b8b
158 160 user: test
159 161 date: Thu Jan 01 00:00:00 1970 +0000
160 162 summary: a
161 163
162 164 % implicit merge with test branch as parent
163 165 abort: branch 'test' has one head - please merge with an explicit rev
164 166 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
165 167 % implicit merge with default branch as parent
166 168 abort: branch 'default' has 3 heads - please merge with an explicit rev
167 169 % 3 branch heads, explicit merge required
168 170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 171 (branch merge, don't forget to commit)
170 172 % 2 branch heads, implicit merge works
171 173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
172 174 (branch merge, don't forget to commit)
General Comments 0
You need to be logged in to leave comments. Login now