##// END OF EJS Templates
restore branch after rollback (issue 902)
Alexandre Vassalotti -
r5814:dd5a501c default
parent child Browse files
Show More
@@ -1,2026 +1,2030 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71 self.sopener = util.encodedopener(util.opener(self.spath),
72 72 self.encodefn)
73 73
74 74 self.ui = ui.ui(parentui=parentui)
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.filterpats = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError, name
102 102
103 103 def url(self):
104 104 return 'file:' + self.root
105 105
106 106 def hook(self, name, throw=False, **args):
107 107 return hook.hook(self.ui, self, name, throw, **args)
108 108
109 109 tag_disallowed = ':\r\n'
110 110
111 111 def _tag(self, name, node, message, local, user, date, parent=None,
112 112 extra={}):
113 113 use_dirstate = parent is None
114 114
115 115 for c in self.tag_disallowed:
116 116 if c in name:
117 117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 118
119 119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 120
121 121 def writetag(fp, name, munge, prevtags):
122 122 if prevtags and prevtags[-1] != '\n':
123 123 fp.write('\n')
124 124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 125 fp.close()
126 126
127 127 prevtags = ''
128 128 if local:
129 129 try:
130 130 fp = self.opener('localtags', 'r+')
131 131 except IOError, err:
132 132 fp = self.opener('localtags', 'a')
133 133 else:
134 134 prevtags = fp.read()
135 135
136 136 # local tags are stored in the current charset
137 137 writetag(fp, name, None, prevtags)
138 138 self.hook('tag', node=hex(node), tag=name, local=local)
139 139 return
140 140
141 141 if use_dirstate:
142 142 try:
143 143 fp = self.wfile('.hgtags', 'rb+')
144 144 except IOError, err:
145 145 fp = self.wfile('.hgtags', 'ab')
146 146 else:
147 147 prevtags = fp.read()
148 148 else:
149 149 try:
150 150 prevtags = self.filectx('.hgtags', parent).data()
151 151 except revlog.LookupError:
152 152 pass
153 153 fp = self.wfile('.hgtags', 'wb')
154 154 if prevtags:
155 155 fp.write(prevtags)
156 156
157 157 # committed tags are stored in UTF-8
158 158 writetag(fp, name, util.fromlocal, prevtags)
159 159
160 160 if use_dirstate and '.hgtags' not in self.dirstate:
161 161 self.add(['.hgtags'])
162 162
163 163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 164 extra=extra)
165 165
166 166 self.hook('tag', node=hex(node), tag=name, local=local)
167 167
168 168 return tagnode
169 169
170 170 def tag(self, name, node, message, local, user, date):
171 171 '''tag a revision with a symbolic name.
172 172
173 173 if local is True, the tag is stored in a per-repository file.
174 174 otherwise, it is stored in the .hgtags file, and a new
175 175 changeset is committed with the change.
176 176
177 177 keyword arguments:
178 178
179 179 local: whether to store tag in non-version-controlled file
180 180 (default False)
181 181
182 182 message: commit message to use if committing
183 183
184 184 user: name of user to use if committing
185 185
186 186 date: date tuple to use if committing'''
187 187
188 188 for x in self.status()[:5]:
189 189 if '.hgtags' in x:
190 190 raise util.Abort(_('working copy of .hgtags is changed '
191 191 '(please commit .hgtags manually)'))
192 192
193 193
194 194 self._tag(name, node, message, local, user, date)
195 195
196 196 def tags(self):
197 197 '''return a mapping of tag to node'''
198 198 if self.tagscache:
199 199 return self.tagscache
200 200
201 201 globaltags = {}
202 202 tagtypes = {}
203 203
204 204 def readtags(lines, fn, tagtype):
205 205 filetags = {}
206 206 count = 0
207 207
208 208 def warn(msg):
209 209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
210 210
211 211 for l in lines:
212 212 count += 1
213 213 if not l:
214 214 continue
215 215 s = l.split(" ", 1)
216 216 if len(s) != 2:
217 217 warn(_("cannot parse entry"))
218 218 continue
219 219 node, key = s
220 220 key = util.tolocal(key.strip()) # stored in UTF-8
221 221 try:
222 222 bin_n = bin(node)
223 223 except TypeError:
224 224 warn(_("node '%s' is not well formed") % node)
225 225 continue
226 226 if bin_n not in self.changelog.nodemap:
227 227 warn(_("tag '%s' refers to unknown node") % key)
228 228 continue
229 229
230 230 h = []
231 231 if key in filetags:
232 232 n, h = filetags[key]
233 233 h.append(n)
234 234 filetags[key] = (bin_n, h)
235 235
236 236 for k, nh in filetags.items():
237 237 if k not in globaltags:
238 238 globaltags[k] = nh
239 239 tagtypes[k] = tagtype
240 240 continue
241 241
242 242 # we prefer the global tag if:
243 243 # it supercedes us OR
244 244 # mutual supercedes and it has a higher rank
245 245 # otherwise we win because we're tip-most
246 246 an, ah = nh
247 247 bn, bh = globaltags[k]
248 248 if (bn != an and an in bh and
249 249 (bn not in ah or len(bh) > len(ah))):
250 250 an = bn
251 251 ah.extend([n for n in bh if n not in ah])
252 252 globaltags[k] = an, ah
253 253 tagtypes[k] = tagtype
254 254
255 255 # read the tags file from each head, ending with the tip
256 256 f = None
257 257 for rev, node, fnode in self._hgtagsnodes():
258 258 f = (f and f.filectx(fnode) or
259 259 self.filectx('.hgtags', fileid=fnode))
260 260 readtags(f.data().splitlines(), f, "global")
261 261
262 262 try:
263 263 data = util.fromlocal(self.opener("localtags").read())
264 264 # localtags are stored in the local character set
265 265 # while the internal tag table is stored in UTF-8
266 266 readtags(data.splitlines(), "localtags", "local")
267 267 except IOError:
268 268 pass
269 269
270 270 self.tagscache = {}
271 271 self._tagstypecache = {}
272 272 for k,nh in globaltags.items():
273 273 n = nh[0]
274 274 if n != nullid:
275 275 self.tagscache[k] = n
276 276 self._tagstypecache[k] = tagtypes[k]
277 277 self.tagscache['tip'] = self.changelog.tip()
278 278
279 279 return self.tagscache
280 280
281 281 def tagtype(self, tagname):
282 282 '''
283 283 return the type of the given tag. result can be:
284 284
285 285 'local' : a local tag
286 286 'global' : a global tag
287 287 None : tag does not exist
288 288 '''
289 289
290 290 self.tags()
291 291
292 292 return self._tagstypecache.get(tagname)
293 293
294 294 def _hgtagsnodes(self):
295 295 heads = self.heads()
296 296 heads.reverse()
297 297 last = {}
298 298 ret = []
299 299 for node in heads:
300 300 c = self.changectx(node)
301 301 rev = c.rev()
302 302 try:
303 303 fnode = c.filenode('.hgtags')
304 304 except revlog.LookupError:
305 305 continue
306 306 ret.append((rev, node, fnode))
307 307 if fnode in last:
308 308 ret[last[fnode]] = None
309 309 last[fnode] = len(ret) - 1
310 310 return [item for item in ret if item]
311 311
312 312 def tagslist(self):
313 313 '''return a list of tags ordered by revision'''
314 314 l = []
315 315 for t, n in self.tags().items():
316 316 try:
317 317 r = self.changelog.rev(n)
318 318 except:
319 319 r = -2 # sort to the beginning of the list if unknown
320 320 l.append((r, t, n))
321 321 l.sort()
322 322 return [(t, n) for r, t, n in l]
323 323
324 324 def nodetags(self, node):
325 325 '''return the tags associated with a node'''
326 326 if not self.nodetagscache:
327 327 self.nodetagscache = {}
328 328 for t, n in self.tags().items():
329 329 self.nodetagscache.setdefault(n, []).append(t)
330 330 return self.nodetagscache.get(node, [])
331 331
332 332 def _branchtags(self):
333 333 partial, last, lrev = self._readbranchcache()
334 334
335 335 tiprev = self.changelog.count() - 1
336 336 if lrev != tiprev:
337 337 self._updatebranchcache(partial, lrev+1, tiprev+1)
338 338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
339 339
340 340 return partial
341 341
342 342 def branchtags(self):
343 343 if self.branchcache is not None:
344 344 return self.branchcache
345 345
346 346 self.branchcache = {} # avoid recursion in changectx
347 347 partial = self._branchtags()
348 348
349 349 # the branch cache is stored on disk as UTF-8, but in the local
350 350 # charset internally
351 351 for k, v in partial.items():
352 352 self.branchcache[util.tolocal(k)] = v
353 353 return self.branchcache
354 354
355 355 def _readbranchcache(self):
356 356 partial = {}
357 357 try:
358 358 f = self.opener("branch.cache")
359 359 lines = f.read().split('\n')
360 360 f.close()
361 361 except (IOError, OSError):
362 362 return {}, nullid, nullrev
363 363
364 364 try:
365 365 last, lrev = lines.pop(0).split(" ", 1)
366 366 last, lrev = bin(last), int(lrev)
367 367 if not (lrev < self.changelog.count() and
368 368 self.changelog.node(lrev) == last): # sanity check
369 369 # invalidate the cache
370 370 raise ValueError('Invalid branch cache: unknown tip')
371 371 for l in lines:
372 372 if not l: continue
373 373 node, label = l.split(" ", 1)
374 374 partial[label.strip()] = bin(node)
375 375 except (KeyboardInterrupt, util.SignalInterrupt):
376 376 raise
377 377 except Exception, inst:
378 378 if self.ui.debugflag:
379 379 self.ui.warn(str(inst), '\n')
380 380 partial, last, lrev = {}, nullid, nullrev
381 381 return partial, last, lrev
382 382
383 383 def _writebranchcache(self, branches, tip, tiprev):
384 384 try:
385 385 f = self.opener("branch.cache", "w", atomictemp=True)
386 386 f.write("%s %s\n" % (hex(tip), tiprev))
387 387 for label, node in branches.iteritems():
388 388 f.write("%s %s\n" % (hex(node), label))
389 389 f.rename()
390 390 except (IOError, OSError):
391 391 pass
392 392
393 393 def _updatebranchcache(self, partial, start, end):
394 394 for r in xrange(start, end):
395 395 c = self.changectx(r)
396 396 b = c.branch()
397 397 partial[b] = c.node()
398 398
399 399 def lookup(self, key):
400 400 if key == '.':
401 401 key, second = self.dirstate.parents()
402 402 if key == nullid:
403 403 raise repo.RepoError(_("no revision checked out"))
404 404 if second != nullid:
405 405 self.ui.warn(_("warning: working directory has two parents, "
406 406 "tag '.' uses the first\n"))
407 407 elif key == 'null':
408 408 return nullid
409 409 n = self.changelog._match(key)
410 410 if n:
411 411 return n
412 412 if key in self.tags():
413 413 return self.tags()[key]
414 414 if key in self.branchtags():
415 415 return self.branchtags()[key]
416 416 n = self.changelog._partialmatch(key)
417 417 if n:
418 418 return n
419 419 try:
420 420 if len(key) == 20:
421 421 key = hex(key)
422 422 except:
423 423 pass
424 424 raise repo.RepoError(_("unknown revision '%s'") % key)
425 425
426 426 def dev(self):
427 427 return os.lstat(self.path).st_dev
428 428
429 429 def local(self):
430 430 return True
431 431
432 432 def join(self, f):
433 433 return os.path.join(self.path, f)
434 434
435 435 def sjoin(self, f):
436 436 f = self.encodefn(f)
437 437 return os.path.join(self.spath, f)
438 438
439 439 def wjoin(self, f):
440 440 return os.path.join(self.root, f)
441 441
442 442 def file(self, f):
443 443 if f[0] == '/':
444 444 f = f[1:]
445 445 return filelog.filelog(self.sopener, f)
446 446
447 447 def changectx(self, changeid=None):
448 448 return context.changectx(self, changeid)
449 449
450 450 def workingctx(self):
451 451 return context.workingctx(self)
452 452
453 453 def parents(self, changeid=None):
454 454 '''
455 455 get list of changectxs for parents of changeid or working directory
456 456 '''
457 457 if changeid is None:
458 458 pl = self.dirstate.parents()
459 459 else:
460 460 n = self.changelog.lookup(changeid)
461 461 pl = self.changelog.parents(n)
462 462 if pl[1] == nullid:
463 463 return [self.changectx(pl[0])]
464 464 return [self.changectx(pl[0]), self.changectx(pl[1])]
465 465
466 466 def filectx(self, path, changeid=None, fileid=None):
467 467 """changeid can be a changeset revision, node, or tag.
468 468 fileid can be a file revision or node."""
469 469 return context.filectx(self, path, changeid, fileid)
470 470
471 471 def getcwd(self):
472 472 return self.dirstate.getcwd()
473 473
474 474 def pathto(self, f, cwd=None):
475 475 return self.dirstate.pathto(f, cwd)
476 476
477 477 def wfile(self, f, mode='r'):
478 478 return self.wopener(f, mode)
479 479
480 480 def _link(self, f):
481 481 return os.path.islink(self.wjoin(f))
482 482
483 483 def _filter(self, filter, filename, data):
484 484 if filter not in self.filterpats:
485 485 l = []
486 486 for pat, cmd in self.ui.configitems(filter):
487 487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 488 l.append((mf, cmd))
489 489 self.filterpats[filter] = l
490 490
491 491 for mf, cmd in self.filterpats[filter]:
492 492 if mf(filename):
493 493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 494 data = util.filter(data, cmd)
495 495 break
496 496
497 497 return data
498 498
499 499 def wread(self, filename):
500 500 if self._link(filename):
501 501 data = os.readlink(self.wjoin(filename))
502 502 else:
503 503 data = self.wopener(filename, 'r').read()
504 504 return self._filter("encode", filename, data)
505 505
506 506 def wwrite(self, filename, data, flags):
507 507 data = self._filter("decode", filename, data)
508 508 try:
509 509 os.unlink(self.wjoin(filename))
510 510 except OSError:
511 511 pass
512 512 self.wopener(filename, 'w').write(data)
513 513 util.set_flags(self.wjoin(filename), flags)
514 514
515 515 def wwritedata(self, filename, data):
516 516 return self._filter("decode", filename, data)
517 517
518 518 def transaction(self):
519 519 if self._transref and self._transref():
520 520 return self._transref().nest()
521 521
522 522 # save dirstate for rollback
523 523 try:
524 524 ds = self.opener("dirstate").read()
525 525 except IOError:
526 526 ds = ""
527 527 self.opener("journal.dirstate", "w").write(ds)
528 self.opener("journal.branch", "w").write(self.dirstate.branch())
528 529
529 530 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
531 (self.join("journal.dirstate"), self.join("undo.dirstate")),
532 (self.join("journal.branch"), self.join("undo.branch"))]
531 533 tr = transaction.transaction(self.ui.warn, self.sopener,
532 534 self.sjoin("journal"),
533 535 aftertrans(renames))
534 536 self._transref = weakref.ref(tr)
535 537 return tr
536 538
537 539 def recover(self):
538 540 l = self.lock()
539 541 try:
540 542 if os.path.exists(self.sjoin("journal")):
541 543 self.ui.status(_("rolling back interrupted transaction\n"))
542 544 transaction.rollback(self.sopener, self.sjoin("journal"))
543 545 self.invalidate()
544 546 return True
545 547 else:
546 548 self.ui.warn(_("no interrupted transaction available\n"))
547 549 return False
548 550 finally:
549 551 del l
550 552
551 553 def rollback(self):
552 554 wlock = lock = None
553 555 try:
554 556 wlock = self.wlock()
555 557 lock = self.lock()
556 558 if os.path.exists(self.sjoin("undo")):
557 559 self.ui.status(_("rolling back last transaction\n"))
558 560 transaction.rollback(self.sopener, self.sjoin("undo"))
559 561 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
562 branch = self.opener("undo.branch").read()
563 self.dirstate.setbranch(branch)
560 564 self.invalidate()
561 565 self.dirstate.invalidate()
562 566 else:
563 567 self.ui.warn(_("no rollback information available\n"))
564 568 finally:
565 569 del lock, wlock
566 570
567 571 def invalidate(self):
568 572 for a in "changelog manifest".split():
569 573 if hasattr(self, a):
570 574 self.__delattr__(a)
571 575 self.tagscache = None
572 576 self._tagstypecache = None
573 577 self.nodetagscache = None
574 578
575 579 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
576 580 try:
577 581 l = lock.lock(lockname, 0, releasefn, desc=desc)
578 582 except lock.LockHeld, inst:
579 583 if not wait:
580 584 raise
581 585 self.ui.warn(_("waiting for lock on %s held by %r\n") %
582 586 (desc, inst.locker))
583 587 # default to 600 seconds timeout
584 588 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
585 589 releasefn, desc=desc)
586 590 if acquirefn:
587 591 acquirefn()
588 592 return l
589 593
590 594 def lock(self, wait=True):
591 595 if self._lockref and self._lockref():
592 596 return self._lockref()
593 597
594 598 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
595 599 _('repository %s') % self.origroot)
596 600 self._lockref = weakref.ref(l)
597 601 return l
598 602
599 603 def wlock(self, wait=True):
600 604 if self._wlockref and self._wlockref():
601 605 return self._wlockref()
602 606
603 607 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
604 608 self.dirstate.invalidate, _('working directory of %s') %
605 609 self.origroot)
606 610 self._wlockref = weakref.ref(l)
607 611 return l
608 612
609 613 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
610 614 """
611 615 commit an individual file as part of a larger transaction
612 616 """
613 617
614 618 t = self.wread(fn)
615 619 fl = self.file(fn)
616 620 fp1 = manifest1.get(fn, nullid)
617 621 fp2 = manifest2.get(fn, nullid)
618 622
619 623 meta = {}
620 624 cp = self.dirstate.copied(fn)
621 625 if cp:
622 626 # Mark the new revision of this file as a copy of another
623 627 # file. This copy data will effectively act as a parent
624 628 # of this new revision. If this is a merge, the first
625 629 # parent will be the nullid (meaning "look up the copy data")
626 630 # and the second one will be the other parent. For example:
627 631 #
628 632 # 0 --- 1 --- 3 rev1 changes file foo
629 633 # \ / rev2 renames foo to bar and changes it
630 634 # \- 2 -/ rev3 should have bar with all changes and
631 635 # should record that bar descends from
632 636 # bar in rev2 and foo in rev1
633 637 #
634 638 # this allows this merge to succeed:
635 639 #
636 640 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
637 641 # \ / merging rev3 and rev4 should use bar@rev2
638 642 # \- 2 --- 4 as the merge base
639 643 #
640 644 meta["copy"] = cp
641 645 if not manifest2: # not a branch merge
642 646 meta["copyrev"] = hex(manifest1.get(cp, nullid))
643 647 fp2 = nullid
644 648 elif fp2 != nullid: # copied on remote side
645 649 meta["copyrev"] = hex(manifest1.get(cp, nullid))
646 650 elif fp1 != nullid: # copied on local side, reversed
647 651 meta["copyrev"] = hex(manifest2.get(cp))
648 652 fp2 = fp1
649 653 elif cp in manifest2: # directory rename on local side
650 654 meta["copyrev"] = hex(manifest2[cp])
651 655 else: # directory rename on remote side
652 656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
653 657 self.ui.debug(_(" %s: copy %s:%s\n") %
654 658 (fn, cp, meta["copyrev"]))
655 659 fp1 = nullid
656 660 elif fp2 != nullid:
657 661 # is one parent an ancestor of the other?
658 662 fpa = fl.ancestor(fp1, fp2)
659 663 if fpa == fp1:
660 664 fp1, fp2 = fp2, nullid
661 665 elif fpa == fp2:
662 666 fp2 = nullid
663 667
664 668 # is the file unmodified from the parent? report existing entry
665 669 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
666 670 return fp1
667 671
668 672 changelist.append(fn)
669 673 return fl.add(t, meta, tr, linkrev, fp1, fp2)
670 674
671 675 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
672 676 if p1 is None:
673 677 p1, p2 = self.dirstate.parents()
674 678 return self.commit(files=files, text=text, user=user, date=date,
675 679 p1=p1, p2=p2, extra=extra, empty_ok=True)
676 680
677 681 def commit(self, files=None, text="", user=None, date=None,
678 682 match=util.always, force=False, force_editor=False,
679 683 p1=None, p2=None, extra={}, empty_ok=False):
680 684 wlock = lock = tr = None
681 685 valid = 0 # don't save the dirstate if this isn't set
682 686 try:
683 687 commit = []
684 688 remove = []
685 689 changed = []
686 690 use_dirstate = (p1 is None) # not rawcommit
687 691 extra = extra.copy()
688 692
689 693 if use_dirstate:
690 694 if files:
691 695 for f in files:
692 696 s = self.dirstate[f]
693 697 if s in 'nma':
694 698 commit.append(f)
695 699 elif s == 'r':
696 700 remove.append(f)
697 701 else:
698 702 self.ui.warn(_("%s not tracked!\n") % f)
699 703 else:
700 704 changes = self.status(match=match)[:5]
701 705 modified, added, removed, deleted, unknown = changes
702 706 commit = modified + added
703 707 remove = removed
704 708 else:
705 709 commit = files
706 710
707 711 if use_dirstate:
708 712 p1, p2 = self.dirstate.parents()
709 713 update_dirstate = True
710 714 else:
711 715 p1, p2 = p1, p2 or nullid
712 716 update_dirstate = (self.dirstate.parents()[0] == p1)
713 717
714 718 c1 = self.changelog.read(p1)
715 719 c2 = self.changelog.read(p2)
716 720 m1 = self.manifest.read(c1[0]).copy()
717 721 m2 = self.manifest.read(c2[0])
718 722
719 723 if use_dirstate:
720 724 branchname = self.workingctx().branch()
721 725 try:
722 726 branchname = branchname.decode('UTF-8').encode('UTF-8')
723 727 except UnicodeDecodeError:
724 728 raise util.Abort(_('branch name not in UTF-8!'))
725 729 else:
726 730 branchname = ""
727 731
728 732 if use_dirstate:
729 733 oldname = c1[5].get("branch") # stored in UTF-8
730 734 if (not commit and not remove and not force and p2 == nullid
731 735 and branchname == oldname):
732 736 self.ui.status(_("nothing changed\n"))
733 737 return None
734 738
735 739 xp1 = hex(p1)
736 740 if p2 == nullid: xp2 = ''
737 741 else: xp2 = hex(p2)
738 742
739 743 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
740 744
741 745 wlock = self.wlock()
742 746 lock = self.lock()
743 747 tr = self.transaction()
744 748 trp = weakref.proxy(tr)
745 749
746 750 # check in files
747 751 new = {}
748 752 linkrev = self.changelog.count()
749 753 commit.sort()
750 754 is_exec = util.execfunc(self.root, m1.execf)
751 755 is_link = util.linkfunc(self.root, m1.linkf)
752 756 for f in commit:
753 757 self.ui.note(f + "\n")
754 758 try:
755 759 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
756 760 new_exec = is_exec(f)
757 761 new_link = is_link(f)
758 762 if ((not changed or changed[-1] != f) and
759 763 m2.get(f) != new[f]):
760 764 # mention the file in the changelog if some
761 765 # flag changed, even if there was no content
762 766 # change.
763 767 old_exec = m1.execf(f)
764 768 old_link = m1.linkf(f)
765 769 if old_exec != new_exec or old_link != new_link:
766 770 changed.append(f)
767 771 m1.set(f, new_exec, new_link)
768 772 if use_dirstate:
769 773 self.dirstate.normal(f)
770 774
771 775 except (OSError, IOError):
772 776 if use_dirstate:
773 777 self.ui.warn(_("trouble committing %s!\n") % f)
774 778 raise
775 779 else:
776 780 remove.append(f)
777 781
778 782 # update manifest
779 783 m1.update(new)
780 784 remove.sort()
781 785 removed = []
782 786
783 787 for f in remove:
784 788 if f in m1:
785 789 del m1[f]
786 790 removed.append(f)
787 791 elif f in m2:
788 792 removed.append(f)
789 793 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
790 794 (new, removed))
791 795
792 796 # add changeset
793 797 new = new.keys()
794 798 new.sort()
795 799
796 800 user = user or self.ui.username()
797 801 if (not empty_ok and not text) or force_editor:
798 802 edittext = []
799 803 if text:
800 804 edittext.append(text)
801 805 edittext.append("")
802 806 edittext.append(_("HG: Enter commit message."
803 807 " Lines beginning with 'HG:' are removed."))
804 808 edittext.append("HG: --")
805 809 edittext.append("HG: user: %s" % user)
806 810 if p2 != nullid:
807 811 edittext.append("HG: branch merge")
808 812 if branchname:
809 813 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
810 814 edittext.extend(["HG: changed %s" % f for f in changed])
811 815 edittext.extend(["HG: removed %s" % f for f in removed])
812 816 if not changed and not remove:
813 817 edittext.append("HG: no files changed")
814 818 edittext.append("")
815 819 # run editor in the repository root
816 820 olddir = os.getcwd()
817 821 os.chdir(self.root)
818 822 text = self.ui.edit("\n".join(edittext), user)
819 823 os.chdir(olddir)
820 824
821 825 if branchname:
822 826 extra["branch"] = branchname
823 827
824 828 if use_dirstate:
825 829 lines = [line.rstrip() for line in text.rstrip().splitlines()]
826 830 while lines and not lines[0]:
827 831 del lines[0]
828 832 if not lines:
829 833 raise util.Abort(_("empty commit message"))
830 834 text = '\n'.join(lines)
831 835
832 836 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
833 837 user, date, extra)
834 838 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
835 839 parent2=xp2)
836 840 tr.close()
837 841
838 842 if self.branchcache and "branch" in extra:
839 843 self.branchcache[util.tolocal(extra["branch"])] = n
840 844
841 845 if use_dirstate or update_dirstate:
842 846 self.dirstate.setparents(n)
843 847 if use_dirstate:
844 848 for f in removed:
845 849 self.dirstate.forget(f)
846 850 valid = 1 # our dirstate updates are complete
847 851
848 852 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
849 853 return n
850 854 finally:
851 855 if not valid: # don't save our updated dirstate
852 856 self.dirstate.invalidate()
853 857 del tr, lock, wlock
854 858
855 859 def walk(self, node=None, files=[], match=util.always, badmatch=None):
856 860 '''
857 861 walk recursively through the directory tree or a given
858 862 changeset, finding all files matched by the match
859 863 function
860 864
861 865 results are yielded in a tuple (src, filename), where src
862 866 is one of:
863 867 'f' the file was found in the directory tree
864 868 'm' the file was only in the dirstate and not in the tree
865 869 'b' file was not found and matched badmatch
866 870 '''
867 871
868 872 if node:
869 873 fdict = dict.fromkeys(files)
870 874 # for dirstate.walk, files=['.'] means "walk the whole tree".
871 875 # follow that here, too
872 876 fdict.pop('.', None)
873 877 mdict = self.manifest.read(self.changelog.read(node)[0])
874 878 mfiles = mdict.keys()
875 879 mfiles.sort()
876 880 for fn in mfiles:
877 881 for ffn in fdict:
878 882 # match if the file is the exact name or a directory
879 883 if ffn == fn or fn.startswith("%s/" % ffn):
880 884 del fdict[ffn]
881 885 break
882 886 if match(fn):
883 887 yield 'm', fn
884 888 ffiles = fdict.keys()
885 889 ffiles.sort()
886 890 for fn in ffiles:
887 891 if badmatch and badmatch(fn):
888 892 if match(fn):
889 893 yield 'b', fn
890 894 else:
891 895 self.ui.warn(_('%s: No such file in rev %s\n')
892 896 % (self.pathto(fn), short(node)))
893 897 else:
894 898 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
895 899 yield src, fn
896 900
897 901 def status(self, node1=None, node2=None, files=[], match=util.always,
898 902 list_ignored=False, list_clean=False):
899 903 """return status of files between two nodes or node and working directory
900 904
901 905 If node1 is None, use the first dirstate parent instead.
902 906 If node2 is None, compare node1 with working directory.
903 907 """
904 908
905 909 def fcmp(fn, getnode):
906 910 t1 = self.wread(fn)
907 911 return self.file(fn).cmp(getnode(fn), t1)
908 912
909 913 def mfmatches(node):
910 914 change = self.changelog.read(node)
911 915 mf = self.manifest.read(change[0]).copy()
912 916 for fn in mf.keys():
913 917 if not match(fn):
914 918 del mf[fn]
915 919 return mf
916 920
917 921 modified, added, removed, deleted, unknown = [], [], [], [], []
918 922 ignored, clean = [], []
919 923
920 924 compareworking = False
921 925 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
922 926 compareworking = True
923 927
924 928 if not compareworking:
925 929 # read the manifest from node1 before the manifest from node2,
926 930 # so that we'll hit the manifest cache if we're going through
927 931 # all the revisions in parent->child order.
928 932 mf1 = mfmatches(node1)
929 933
930 934 # are we comparing the working directory?
931 935 if not node2:
932 936 (lookup, modified, added, removed, deleted, unknown,
933 937 ignored, clean) = self.dirstate.status(files, match,
934 938 list_ignored, list_clean)
935 939
936 940 # are we comparing working dir against its parent?
937 941 if compareworking:
938 942 if lookup:
939 943 fixup = []
940 944 # do a full compare of any files that might have changed
941 945 ctx = self.changectx()
942 946 for f in lookup:
943 947 if f not in ctx or ctx[f].cmp(self.wread(f)):
944 948 modified.append(f)
945 949 else:
946 950 fixup.append(f)
947 951 if list_clean:
948 952 clean.append(f)
949 953
950 954 # update dirstate for files that are actually clean
951 955 if fixup:
952 956 wlock = None
953 957 try:
954 958 try:
955 959 wlock = self.wlock(False)
956 960 except lock.LockException:
957 961 pass
958 962 if wlock:
959 963 for f in fixup:
960 964 self.dirstate.normal(f)
961 965 finally:
962 966 del wlock
963 967 else:
964 968 # we are comparing working dir against non-parent
965 969 # generate a pseudo-manifest for the working dir
966 970 # XXX: create it in dirstate.py ?
967 971 mf2 = mfmatches(self.dirstate.parents()[0])
968 972 is_exec = util.execfunc(self.root, mf2.execf)
969 973 is_link = util.linkfunc(self.root, mf2.linkf)
970 974 for f in lookup + modified + added:
971 975 mf2[f] = ""
972 976 mf2.set(f, is_exec(f), is_link(f))
973 977 for f in removed:
974 978 if f in mf2:
975 979 del mf2[f]
976 980
977 981 else:
978 982 # we are comparing two revisions
979 983 mf2 = mfmatches(node2)
980 984
981 985 if not compareworking:
982 986 # flush lists from dirstate before comparing manifests
983 987 modified, added, clean = [], [], []
984 988
985 989 # make sure to sort the files so we talk to the disk in a
986 990 # reasonable order
987 991 mf2keys = mf2.keys()
988 992 mf2keys.sort()
989 993 getnode = lambda fn: mf1.get(fn, nullid)
990 994 for fn in mf2keys:
991 995 if mf1.has_key(fn):
992 996 if (mf1.flags(fn) != mf2.flags(fn) or
993 997 (mf1[fn] != mf2[fn] and
994 998 (mf2[fn] != "" or fcmp(fn, getnode)))):
995 999 modified.append(fn)
996 1000 elif list_clean:
997 1001 clean.append(fn)
998 1002 del mf1[fn]
999 1003 else:
1000 1004 added.append(fn)
1001 1005
1002 1006 removed = mf1.keys()
1003 1007
1004 1008 # sort and return results:
1005 1009 for l in modified, added, removed, deleted, unknown, ignored, clean:
1006 1010 l.sort()
1007 1011 return (modified, added, removed, deleted, unknown, ignored, clean)
1008 1012
1009 1013 def add(self, list):
1010 1014 wlock = self.wlock()
1011 1015 try:
1012 1016 rejected = []
1013 1017 for f in list:
1014 1018 p = self.wjoin(f)
1015 1019 try:
1016 1020 st = os.lstat(p)
1017 1021 except:
1018 1022 self.ui.warn(_("%s does not exist!\n") % f)
1019 1023 rejected.append(f)
1020 1024 continue
1021 1025 if st.st_size > 10000000:
1022 1026 self.ui.warn(_("%s: files over 10MB may cause memory and"
1023 1027 " performance problems\n"
1024 1028 "(use 'hg revert %s' to unadd the file)\n")
1025 1029 % (f, f))
1026 1030 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1027 1031 self.ui.warn(_("%s not added: only files and symlinks "
1028 1032 "supported currently\n") % f)
1029 1033 rejected.append(p)
1030 1034 elif self.dirstate[f] in 'amn':
1031 1035 self.ui.warn(_("%s already tracked!\n") % f)
1032 1036 elif self.dirstate[f] == 'r':
1033 1037 self.dirstate.normallookup(f)
1034 1038 else:
1035 1039 self.dirstate.add(f)
1036 1040 return rejected
1037 1041 finally:
1038 1042 del wlock
1039 1043
1040 1044 def forget(self, list):
1041 1045 wlock = self.wlock()
1042 1046 try:
1043 1047 for f in list:
1044 1048 if self.dirstate[f] != 'a':
1045 1049 self.ui.warn(_("%s not added!\n") % f)
1046 1050 else:
1047 1051 self.dirstate.forget(f)
1048 1052 finally:
1049 1053 del wlock
1050 1054
1051 1055 def remove(self, list, unlink=False):
1052 1056 wlock = None
1053 1057 try:
1054 1058 if unlink:
1055 1059 for f in list:
1056 1060 try:
1057 1061 util.unlink(self.wjoin(f))
1058 1062 except OSError, inst:
1059 1063 if inst.errno != errno.ENOENT:
1060 1064 raise
1061 1065 wlock = self.wlock()
1062 1066 for f in list:
1063 1067 if unlink and os.path.exists(self.wjoin(f)):
1064 1068 self.ui.warn(_("%s still exists!\n") % f)
1065 1069 elif self.dirstate[f] == 'a':
1066 1070 self.dirstate.forget(f)
1067 1071 elif f not in self.dirstate:
1068 1072 self.ui.warn(_("%s not tracked!\n") % f)
1069 1073 else:
1070 1074 self.dirstate.remove(f)
1071 1075 finally:
1072 1076 del wlock
1073 1077
1074 1078 def undelete(self, list):
1075 1079 wlock = None
1076 1080 try:
1077 1081 manifests = [self.manifest.read(self.changelog.read(p)[0])
1078 1082 for p in self.dirstate.parents() if p != nullid]
1079 1083 wlock = self.wlock()
1080 1084 for f in list:
1081 1085 if self.dirstate[f] != 'r':
1082 1086 self.ui.warn("%s not removed!\n" % f)
1083 1087 else:
1084 1088 m = f in manifests[0] and manifests[0] or manifests[1]
1085 1089 t = self.file(f).read(m[f])
1086 1090 self.wwrite(f, t, m.flags(f))
1087 1091 self.dirstate.normal(f)
1088 1092 finally:
1089 1093 del wlock
1090 1094
1091 1095 def copy(self, source, dest):
1092 1096 wlock = None
1093 1097 try:
1094 1098 p = self.wjoin(dest)
1095 1099 if not (os.path.exists(p) or os.path.islink(p)):
1096 1100 self.ui.warn(_("%s does not exist!\n") % dest)
1097 1101 elif not (os.path.isfile(p) or os.path.islink(p)):
1098 1102 self.ui.warn(_("copy failed: %s is not a file or a "
1099 1103 "symbolic link\n") % dest)
1100 1104 else:
1101 1105 wlock = self.wlock()
1102 1106 if dest not in self.dirstate:
1103 1107 self.dirstate.add(dest)
1104 1108 self.dirstate.copy(source, dest)
1105 1109 finally:
1106 1110 del wlock
1107 1111
1108 1112 def heads(self, start=None):
1109 1113 heads = self.changelog.heads(start)
1110 1114 # sort the output in rev descending order
1111 1115 heads = [(-self.changelog.rev(h), h) for h in heads]
1112 1116 heads.sort()
1113 1117 return [n for (r, n) in heads]
1114 1118
1115 1119 def branchheads(self, branch, start=None):
1116 1120 branches = self.branchtags()
1117 1121 if branch not in branches:
1118 1122 return []
1119 1123 # The basic algorithm is this:
1120 1124 #
1121 1125 # Start from the branch tip since there are no later revisions that can
1122 1126 # possibly be in this branch, and the tip is a guaranteed head.
1123 1127 #
1124 1128 # Remember the tip's parents as the first ancestors, since these by
1125 1129 # definition are not heads.
1126 1130 #
1127 1131 # Step backwards from the brach tip through all the revisions. We are
1128 1132 # guaranteed by the rules of Mercurial that we will now be visiting the
1129 1133 # nodes in reverse topological order (children before parents).
1130 1134 #
1131 1135 # If a revision is one of the ancestors of a head then we can toss it
1132 1136 # out of the ancestors set (we've already found it and won't be
1133 1137 # visiting it again) and put its parents in the ancestors set.
1134 1138 #
1135 1139 # Otherwise, if a revision is in the branch it's another head, since it
1136 1140 # wasn't in the ancestor list of an existing head. So add it to the
1137 1141 # head list, and add its parents to the ancestor list.
1138 1142 #
1139 1143 # If it is not in the branch ignore it.
1140 1144 #
1141 1145 # Once we have a list of heads, use nodesbetween to filter out all the
1142 1146 # heads that cannot be reached from startrev. There may be a more
1143 1147 # efficient way to do this as part of the previous algorithm.
1144 1148
1145 1149 set = util.set
1146 1150 heads = [self.changelog.rev(branches[branch])]
1147 1151 # Don't care if ancestors contains nullrev or not.
1148 1152 ancestors = set(self.changelog.parentrevs(heads[0]))
1149 1153 for rev in xrange(heads[0] - 1, nullrev, -1):
1150 1154 if rev in ancestors:
1151 1155 ancestors.update(self.changelog.parentrevs(rev))
1152 1156 ancestors.remove(rev)
1153 1157 elif self.changectx(rev).branch() == branch:
1154 1158 heads.append(rev)
1155 1159 ancestors.update(self.changelog.parentrevs(rev))
1156 1160 heads = [self.changelog.node(rev) for rev in heads]
1157 1161 if start is not None:
1158 1162 heads = self.changelog.nodesbetween([start], heads)[2]
1159 1163 return heads
1160 1164
1161 1165 def branches(self, nodes):
1162 1166 if not nodes:
1163 1167 nodes = [self.changelog.tip()]
1164 1168 b = []
1165 1169 for n in nodes:
1166 1170 t = n
1167 1171 while 1:
1168 1172 p = self.changelog.parents(n)
1169 1173 if p[1] != nullid or p[0] == nullid:
1170 1174 b.append((t, n, p[0], p[1]))
1171 1175 break
1172 1176 n = p[0]
1173 1177 return b
1174 1178
1175 1179 def between(self, pairs):
1176 1180 r = []
1177 1181
1178 1182 for top, bottom in pairs:
1179 1183 n, l, i = top, [], 0
1180 1184 f = 1
1181 1185
1182 1186 while n != bottom:
1183 1187 p = self.changelog.parents(n)[0]
1184 1188 if i == f:
1185 1189 l.append(n)
1186 1190 f = f * 2
1187 1191 n = p
1188 1192 i += 1
1189 1193
1190 1194 r.append(l)
1191 1195
1192 1196 return r
1193 1197
1194 1198 def findincoming(self, remote, base=None, heads=None, force=False):
1195 1199 """Return list of roots of the subsets of missing nodes from remote
1196 1200
1197 1201 If base dict is specified, assume that these nodes and their parents
1198 1202 exist on the remote side and that no child of a node of base exists
1199 1203 in both remote and self.
1200 1204 Furthermore base will be updated to include the nodes that exists
1201 1205 in self and remote but no children exists in self and remote.
1202 1206 If a list of heads is specified, return only nodes which are heads
1203 1207 or ancestors of these heads.
1204 1208
1205 1209 All the ancestors of base are in self and in remote.
1206 1210 All the descendants of the list returned are missing in self.
1207 1211 (and so we know that the rest of the nodes are missing in remote, see
1208 1212 outgoing)
1209 1213 """
1210 1214 m = self.changelog.nodemap
1211 1215 search = []
1212 1216 fetch = {}
1213 1217 seen = {}
1214 1218 seenbranch = {}
1215 1219 if base == None:
1216 1220 base = {}
1217 1221
1218 1222 if not heads:
1219 1223 heads = remote.heads()
1220 1224
1221 1225 if self.changelog.tip() == nullid:
1222 1226 base[nullid] = 1
1223 1227 if heads != [nullid]:
1224 1228 return [nullid]
1225 1229 return []
1226 1230
1227 1231 # assume we're closer to the tip than the root
1228 1232 # and start by examining the heads
1229 1233 self.ui.status(_("searching for changes\n"))
1230 1234
1231 1235 unknown = []
1232 1236 for h in heads:
1233 1237 if h not in m:
1234 1238 unknown.append(h)
1235 1239 else:
1236 1240 base[h] = 1
1237 1241
1238 1242 if not unknown:
1239 1243 return []
1240 1244
1241 1245 req = dict.fromkeys(unknown)
1242 1246 reqcnt = 0
1243 1247
1244 1248 # search through remote branches
1245 1249 # a 'branch' here is a linear segment of history, with four parts:
1246 1250 # head, root, first parent, second parent
1247 1251 # (a branch always has two parents (or none) by definition)
1248 1252 unknown = remote.branches(unknown)
1249 1253 while unknown:
1250 1254 r = []
1251 1255 while unknown:
1252 1256 n = unknown.pop(0)
1253 1257 if n[0] in seen:
1254 1258 continue
1255 1259
1256 1260 self.ui.debug(_("examining %s:%s\n")
1257 1261 % (short(n[0]), short(n[1])))
1258 1262 if n[0] == nullid: # found the end of the branch
1259 1263 pass
1260 1264 elif n in seenbranch:
1261 1265 self.ui.debug(_("branch already found\n"))
1262 1266 continue
1263 1267 elif n[1] and n[1] in m: # do we know the base?
1264 1268 self.ui.debug(_("found incomplete branch %s:%s\n")
1265 1269 % (short(n[0]), short(n[1])))
1266 1270 search.append(n) # schedule branch range for scanning
1267 1271 seenbranch[n] = 1
1268 1272 else:
1269 1273 if n[1] not in seen and n[1] not in fetch:
1270 1274 if n[2] in m and n[3] in m:
1271 1275 self.ui.debug(_("found new changeset %s\n") %
1272 1276 short(n[1]))
1273 1277 fetch[n[1]] = 1 # earliest unknown
1274 1278 for p in n[2:4]:
1275 1279 if p in m:
1276 1280 base[p] = 1 # latest known
1277 1281
1278 1282 for p in n[2:4]:
1279 1283 if p not in req and p not in m:
1280 1284 r.append(p)
1281 1285 req[p] = 1
1282 1286 seen[n[0]] = 1
1283 1287
1284 1288 if r:
1285 1289 reqcnt += 1
1286 1290 self.ui.debug(_("request %d: %s\n") %
1287 1291 (reqcnt, " ".join(map(short, r))))
1288 1292 for p in xrange(0, len(r), 10):
1289 1293 for b in remote.branches(r[p:p+10]):
1290 1294 self.ui.debug(_("received %s:%s\n") %
1291 1295 (short(b[0]), short(b[1])))
1292 1296 unknown.append(b)
1293 1297
1294 1298 # do binary search on the branches we found
1295 1299 while search:
1296 1300 n = search.pop(0)
1297 1301 reqcnt += 1
1298 1302 l = remote.between([(n[0], n[1])])[0]
1299 1303 l.append(n[1])
1300 1304 p = n[0]
1301 1305 f = 1
1302 1306 for i in l:
1303 1307 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1304 1308 if i in m:
1305 1309 if f <= 2:
1306 1310 self.ui.debug(_("found new branch changeset %s\n") %
1307 1311 short(p))
1308 1312 fetch[p] = 1
1309 1313 base[i] = 1
1310 1314 else:
1311 1315 self.ui.debug(_("narrowed branch search to %s:%s\n")
1312 1316 % (short(p), short(i)))
1313 1317 search.append((p, i))
1314 1318 break
1315 1319 p, f = i, f * 2
1316 1320
1317 1321 # sanity check our fetch list
1318 1322 for f in fetch.keys():
1319 1323 if f in m:
1320 1324 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1321 1325
1322 1326 if base.keys() == [nullid]:
1323 1327 if force:
1324 1328 self.ui.warn(_("warning: repository is unrelated\n"))
1325 1329 else:
1326 1330 raise util.Abort(_("repository is unrelated"))
1327 1331
1328 1332 self.ui.debug(_("found new changesets starting at ") +
1329 1333 " ".join([short(f) for f in fetch]) + "\n")
1330 1334
1331 1335 self.ui.debug(_("%d total queries\n") % reqcnt)
1332 1336
1333 1337 return fetch.keys()
1334 1338
1335 1339 def findoutgoing(self, remote, base=None, heads=None, force=False):
1336 1340 """Return list of nodes that are roots of subsets not in remote
1337 1341
1338 1342 If base dict is specified, assume that these nodes and their parents
1339 1343 exist on the remote side.
1340 1344 If a list of heads is specified, return only nodes which are heads
1341 1345 or ancestors of these heads, and return a second element which
1342 1346 contains all remote heads which get new children.
1343 1347 """
1344 1348 if base == None:
1345 1349 base = {}
1346 1350 self.findincoming(remote, base, heads, force=force)
1347 1351
1348 1352 self.ui.debug(_("common changesets up to ")
1349 1353 + " ".join(map(short, base.keys())) + "\n")
1350 1354
1351 1355 remain = dict.fromkeys(self.changelog.nodemap)
1352 1356
1353 1357 # prune everything remote has from the tree
1354 1358 del remain[nullid]
1355 1359 remove = base.keys()
1356 1360 while remove:
1357 1361 n = remove.pop(0)
1358 1362 if n in remain:
1359 1363 del remain[n]
1360 1364 for p in self.changelog.parents(n):
1361 1365 remove.append(p)
1362 1366
1363 1367 # find every node whose parents have been pruned
1364 1368 subset = []
1365 1369 # find every remote head that will get new children
1366 1370 updated_heads = {}
1367 1371 for n in remain:
1368 1372 p1, p2 = self.changelog.parents(n)
1369 1373 if p1 not in remain and p2 not in remain:
1370 1374 subset.append(n)
1371 1375 if heads:
1372 1376 if p1 in heads:
1373 1377 updated_heads[p1] = True
1374 1378 if p2 in heads:
1375 1379 updated_heads[p2] = True
1376 1380
1377 1381 # this is the set of all roots we have to push
1378 1382 if heads:
1379 1383 return subset, updated_heads.keys()
1380 1384 else:
1381 1385 return subset
1382 1386
1383 1387 def pull(self, remote, heads=None, force=False):
1384 1388 lock = self.lock()
1385 1389 try:
1386 1390 fetch = self.findincoming(remote, heads=heads, force=force)
1387 1391 if fetch == [nullid]:
1388 1392 self.ui.status(_("requesting all changes\n"))
1389 1393
1390 1394 if not fetch:
1391 1395 self.ui.status(_("no changes found\n"))
1392 1396 return 0
1393 1397
1394 1398 if heads is None:
1395 1399 cg = remote.changegroup(fetch, 'pull')
1396 1400 else:
1397 1401 if 'changegroupsubset' not in remote.capabilities:
1398 1402 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1399 1403 cg = remote.changegroupsubset(fetch, heads, 'pull')
1400 1404 return self.addchangegroup(cg, 'pull', remote.url())
1401 1405 finally:
1402 1406 del lock
1403 1407
1404 1408 def push(self, remote, force=False, revs=None):
1405 1409 # there are two ways to push to remote repo:
1406 1410 #
1407 1411 # addchangegroup assumes local user can lock remote
1408 1412 # repo (local filesystem, old ssh servers).
1409 1413 #
1410 1414 # unbundle assumes local user cannot lock remote repo (new ssh
1411 1415 # servers, http servers).
1412 1416
1413 1417 if remote.capable('unbundle'):
1414 1418 return self.push_unbundle(remote, force, revs)
1415 1419 return self.push_addchangegroup(remote, force, revs)
1416 1420
1417 1421 def prepush(self, remote, force, revs):
1418 1422 base = {}
1419 1423 remote_heads = remote.heads()
1420 1424 inc = self.findincoming(remote, base, remote_heads, force=force)
1421 1425
1422 1426 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1423 1427 if revs is not None:
1424 1428 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1425 1429 else:
1426 1430 bases, heads = update, self.changelog.heads()
1427 1431
1428 1432 if not bases:
1429 1433 self.ui.status(_("no changes found\n"))
1430 1434 return None, 1
1431 1435 elif not force:
1432 1436 # check if we're creating new remote heads
1433 1437 # to be a remote head after push, node must be either
1434 1438 # - unknown locally
1435 1439 # - a local outgoing head descended from update
1436 1440 # - a remote head that's known locally and not
1437 1441 # ancestral to an outgoing head
1438 1442
1439 1443 warn = 0
1440 1444
1441 1445 if remote_heads == [nullid]:
1442 1446 warn = 0
1443 1447 elif not revs and len(heads) > len(remote_heads):
1444 1448 warn = 1
1445 1449 else:
1446 1450 newheads = list(heads)
1447 1451 for r in remote_heads:
1448 1452 if r in self.changelog.nodemap:
1449 1453 desc = self.changelog.heads(r, heads)
1450 1454 l = [h for h in heads if h in desc]
1451 1455 if not l:
1452 1456 newheads.append(r)
1453 1457 else:
1454 1458 newheads.append(r)
1455 1459 if len(newheads) > len(remote_heads):
1456 1460 warn = 1
1457 1461
1458 1462 if warn:
1459 1463 self.ui.warn(_("abort: push creates new remote branches!\n"))
1460 1464 self.ui.status(_("(did you forget to merge?"
1461 1465 " use push -f to force)\n"))
1462 1466 return None, 1
1463 1467 elif inc:
1464 1468 self.ui.warn(_("note: unsynced remote changes!\n"))
1465 1469
1466 1470
1467 1471 if revs is None:
1468 1472 cg = self.changegroup(update, 'push')
1469 1473 else:
1470 1474 cg = self.changegroupsubset(update, revs, 'push')
1471 1475 return cg, remote_heads
1472 1476
1473 1477 def push_addchangegroup(self, remote, force, revs):
1474 1478 lock = remote.lock()
1475 1479 try:
1476 1480 ret = self.prepush(remote, force, revs)
1477 1481 if ret[0] is not None:
1478 1482 cg, remote_heads = ret
1479 1483 return remote.addchangegroup(cg, 'push', self.url())
1480 1484 return ret[1]
1481 1485 finally:
1482 1486 del lock
1483 1487
1484 1488 def push_unbundle(self, remote, force, revs):
1485 1489 # local repo finds heads on server, finds out what revs it
1486 1490 # must push. once revs transferred, if server finds it has
1487 1491 # different heads (someone else won commit/push race), server
1488 1492 # aborts.
1489 1493
1490 1494 ret = self.prepush(remote, force, revs)
1491 1495 if ret[0] is not None:
1492 1496 cg, remote_heads = ret
1493 1497 if force: remote_heads = ['force']
1494 1498 return remote.unbundle(cg, remote_heads, 'push')
1495 1499 return ret[1]
1496 1500
1497 1501 def changegroupinfo(self, nodes, source):
1498 1502 if self.ui.verbose or source == 'bundle':
1499 1503 self.ui.status(_("%d changesets found\n") % len(nodes))
1500 1504 if self.ui.debugflag:
1501 1505 self.ui.debug(_("List of changesets:\n"))
1502 1506 for node in nodes:
1503 1507 self.ui.debug("%s\n" % hex(node))
1504 1508
1505 1509 def changegroupsubset(self, bases, heads, source):
1506 1510 """This function generates a changegroup consisting of all the nodes
1507 1511 that are descendents of any of the bases, and ancestors of any of
1508 1512 the heads.
1509 1513
1510 1514 It is fairly complex as determining which filenodes and which
1511 1515 manifest nodes need to be included for the changeset to be complete
1512 1516 is non-trivial.
1513 1517
1514 1518 Another wrinkle is doing the reverse, figuring out which changeset in
1515 1519 the changegroup a particular filenode or manifestnode belongs to."""
1516 1520
1517 1521 self.hook('preoutgoing', throw=True, source=source)
1518 1522
1519 1523 # Set up some initial variables
1520 1524 # Make it easy to refer to self.changelog
1521 1525 cl = self.changelog
1522 1526 # msng is short for missing - compute the list of changesets in this
1523 1527 # changegroup.
1524 1528 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1525 1529 self.changegroupinfo(msng_cl_lst, source)
1526 1530 # Some bases may turn out to be superfluous, and some heads may be
1527 1531 # too. nodesbetween will return the minimal set of bases and heads
1528 1532 # necessary to re-create the changegroup.
1529 1533
1530 1534 # Known heads are the list of heads that it is assumed the recipient
1531 1535 # of this changegroup will know about.
1532 1536 knownheads = {}
1533 1537 # We assume that all parents of bases are known heads.
1534 1538 for n in bases:
1535 1539 for p in cl.parents(n):
1536 1540 if p != nullid:
1537 1541 knownheads[p] = 1
1538 1542 knownheads = knownheads.keys()
1539 1543 if knownheads:
1540 1544 # Now that we know what heads are known, we can compute which
1541 1545 # changesets are known. The recipient must know about all
1542 1546 # changesets required to reach the known heads from the null
1543 1547 # changeset.
1544 1548 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1545 1549 junk = None
1546 1550 # Transform the list into an ersatz set.
1547 1551 has_cl_set = dict.fromkeys(has_cl_set)
1548 1552 else:
1549 1553 # If there were no known heads, the recipient cannot be assumed to
1550 1554 # know about any changesets.
1551 1555 has_cl_set = {}
1552 1556
1553 1557 # Make it easy to refer to self.manifest
1554 1558 mnfst = self.manifest
1555 1559 # We don't know which manifests are missing yet
1556 1560 msng_mnfst_set = {}
1557 1561 # Nor do we know which filenodes are missing.
1558 1562 msng_filenode_set = {}
1559 1563
1560 1564 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1561 1565 junk = None
1562 1566
1563 1567 # A changeset always belongs to itself, so the changenode lookup
1564 1568 # function for a changenode is identity.
1565 1569 def identity(x):
1566 1570 return x
1567 1571
1568 1572 # A function generating function. Sets up an environment for the
1569 1573 # inner function.
1570 1574 def cmp_by_rev_func(revlog):
1571 1575 # Compare two nodes by their revision number in the environment's
1572 1576 # revision history. Since the revision number both represents the
1573 1577 # most efficient order to read the nodes in, and represents a
1574 1578 # topological sorting of the nodes, this function is often useful.
1575 1579 def cmp_by_rev(a, b):
1576 1580 return cmp(revlog.rev(a), revlog.rev(b))
1577 1581 return cmp_by_rev
1578 1582
1579 1583 # If we determine that a particular file or manifest node must be a
1580 1584 # node that the recipient of the changegroup will already have, we can
1581 1585 # also assume the recipient will have all the parents. This function
1582 1586 # prunes them from the set of missing nodes.
1583 1587 def prune_parents(revlog, hasset, msngset):
1584 1588 haslst = hasset.keys()
1585 1589 haslst.sort(cmp_by_rev_func(revlog))
1586 1590 for node in haslst:
1587 1591 parentlst = [p for p in revlog.parents(node) if p != nullid]
1588 1592 while parentlst:
1589 1593 n = parentlst.pop()
1590 1594 if n not in hasset:
1591 1595 hasset[n] = 1
1592 1596 p = [p for p in revlog.parents(n) if p != nullid]
1593 1597 parentlst.extend(p)
1594 1598 for n in hasset:
1595 1599 msngset.pop(n, None)
1596 1600
1597 1601 # This is a function generating function used to set up an environment
1598 1602 # for the inner function to execute in.
1599 1603 def manifest_and_file_collector(changedfileset):
1600 1604 # This is an information gathering function that gathers
1601 1605 # information from each changeset node that goes out as part of
1602 1606 # the changegroup. The information gathered is a list of which
1603 1607 # manifest nodes are potentially required (the recipient may
1604 1608 # already have them) and total list of all files which were
1605 1609 # changed in any changeset in the changegroup.
1606 1610 #
1607 1611 # We also remember the first changenode we saw any manifest
1608 1612 # referenced by so we can later determine which changenode 'owns'
1609 1613 # the manifest.
1610 1614 def collect_manifests_and_files(clnode):
1611 1615 c = cl.read(clnode)
1612 1616 for f in c[3]:
1613 1617 # This is to make sure we only have one instance of each
1614 1618 # filename string for each filename.
1615 1619 changedfileset.setdefault(f, f)
1616 1620 msng_mnfst_set.setdefault(c[0], clnode)
1617 1621 return collect_manifests_and_files
1618 1622
1619 1623 # Figure out which manifest nodes (of the ones we think might be part
1620 1624 # of the changegroup) the recipient must know about and remove them
1621 1625 # from the changegroup.
1622 1626 def prune_manifests():
1623 1627 has_mnfst_set = {}
1624 1628 for n in msng_mnfst_set:
1625 1629 # If a 'missing' manifest thinks it belongs to a changenode
1626 1630 # the recipient is assumed to have, obviously the recipient
1627 1631 # must have that manifest.
1628 1632 linknode = cl.node(mnfst.linkrev(n))
1629 1633 if linknode in has_cl_set:
1630 1634 has_mnfst_set[n] = 1
1631 1635 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1632 1636
1633 1637 # Use the information collected in collect_manifests_and_files to say
1634 1638 # which changenode any manifestnode belongs to.
1635 1639 def lookup_manifest_link(mnfstnode):
1636 1640 return msng_mnfst_set[mnfstnode]
1637 1641
1638 1642 # A function generating function that sets up the initial environment
1639 1643 # the inner function.
1640 1644 def filenode_collector(changedfiles):
1641 1645 next_rev = [0]
1642 1646 # This gathers information from each manifestnode included in the
1643 1647 # changegroup about which filenodes the manifest node references
1644 1648 # so we can include those in the changegroup too.
1645 1649 #
1646 1650 # It also remembers which changenode each filenode belongs to. It
1647 1651 # does this by assuming the a filenode belongs to the changenode
1648 1652 # the first manifest that references it belongs to.
1649 1653 def collect_msng_filenodes(mnfstnode):
1650 1654 r = mnfst.rev(mnfstnode)
1651 1655 if r == next_rev[0]:
1652 1656 # If the last rev we looked at was the one just previous,
1653 1657 # we only need to see a diff.
1654 1658 deltamf = mnfst.readdelta(mnfstnode)
1655 1659 # For each line in the delta
1656 1660 for f, fnode in deltamf.items():
1657 1661 f = changedfiles.get(f, None)
1658 1662 # And if the file is in the list of files we care
1659 1663 # about.
1660 1664 if f is not None:
1661 1665 # Get the changenode this manifest belongs to
1662 1666 clnode = msng_mnfst_set[mnfstnode]
1663 1667 # Create the set of filenodes for the file if
1664 1668 # there isn't one already.
1665 1669 ndset = msng_filenode_set.setdefault(f, {})
1666 1670 # And set the filenode's changelog node to the
1667 1671 # manifest's if it hasn't been set already.
1668 1672 ndset.setdefault(fnode, clnode)
1669 1673 else:
1670 1674 # Otherwise we need a full manifest.
1671 1675 m = mnfst.read(mnfstnode)
1672 1676 # For every file in we care about.
1673 1677 for f in changedfiles:
1674 1678 fnode = m.get(f, None)
1675 1679 # If it's in the manifest
1676 1680 if fnode is not None:
1677 1681 # See comments above.
1678 1682 clnode = msng_mnfst_set[mnfstnode]
1679 1683 ndset = msng_filenode_set.setdefault(f, {})
1680 1684 ndset.setdefault(fnode, clnode)
1681 1685 # Remember the revision we hope to see next.
1682 1686 next_rev[0] = r + 1
1683 1687 return collect_msng_filenodes
1684 1688
1685 1689 # We have a list of filenodes we think we need for a file, lets remove
1686 1690 # all those we now the recipient must have.
1687 1691 def prune_filenodes(f, filerevlog):
1688 1692 msngset = msng_filenode_set[f]
1689 1693 hasset = {}
1690 1694 # If a 'missing' filenode thinks it belongs to a changenode we
1691 1695 # assume the recipient must have, then the recipient must have
1692 1696 # that filenode.
1693 1697 for n in msngset:
1694 1698 clnode = cl.node(filerevlog.linkrev(n))
1695 1699 if clnode in has_cl_set:
1696 1700 hasset[n] = 1
1697 1701 prune_parents(filerevlog, hasset, msngset)
1698 1702
1699 1703 # A function generator function that sets up the a context for the
1700 1704 # inner function.
1701 1705 def lookup_filenode_link_func(fname):
1702 1706 msngset = msng_filenode_set[fname]
1703 1707 # Lookup the changenode the filenode belongs to.
1704 1708 def lookup_filenode_link(fnode):
1705 1709 return msngset[fnode]
1706 1710 return lookup_filenode_link
1707 1711
1708 1712 # Now that we have all theses utility functions to help out and
1709 1713 # logically divide up the task, generate the group.
1710 1714 def gengroup():
1711 1715 # The set of changed files starts empty.
1712 1716 changedfiles = {}
1713 1717 # Create a changenode group generator that will call our functions
1714 1718 # back to lookup the owning changenode and collect information.
1715 1719 group = cl.group(msng_cl_lst, identity,
1716 1720 manifest_and_file_collector(changedfiles))
1717 1721 for chnk in group:
1718 1722 yield chnk
1719 1723
1720 1724 # The list of manifests has been collected by the generator
1721 1725 # calling our functions back.
1722 1726 prune_manifests()
1723 1727 msng_mnfst_lst = msng_mnfst_set.keys()
1724 1728 # Sort the manifestnodes by revision number.
1725 1729 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1726 1730 # Create a generator for the manifestnodes that calls our lookup
1727 1731 # and data collection functions back.
1728 1732 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1729 1733 filenode_collector(changedfiles))
1730 1734 for chnk in group:
1731 1735 yield chnk
1732 1736
1733 1737 # These are no longer needed, dereference and toss the memory for
1734 1738 # them.
1735 1739 msng_mnfst_lst = None
1736 1740 msng_mnfst_set.clear()
1737 1741
1738 1742 changedfiles = changedfiles.keys()
1739 1743 changedfiles.sort()
1740 1744 # Go through all our files in order sorted by name.
1741 1745 for fname in changedfiles:
1742 1746 filerevlog = self.file(fname)
1743 1747 if filerevlog.count() == 0:
1744 1748 raise util.Abort(_("empty or missing revlog for %s") % fname)
1745 1749 # Toss out the filenodes that the recipient isn't really
1746 1750 # missing.
1747 1751 if msng_filenode_set.has_key(fname):
1748 1752 prune_filenodes(fname, filerevlog)
1749 1753 msng_filenode_lst = msng_filenode_set[fname].keys()
1750 1754 else:
1751 1755 msng_filenode_lst = []
1752 1756 # If any filenodes are left, generate the group for them,
1753 1757 # otherwise don't bother.
1754 1758 if len(msng_filenode_lst) > 0:
1755 1759 yield changegroup.chunkheader(len(fname))
1756 1760 yield fname
1757 1761 # Sort the filenodes by their revision #
1758 1762 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1759 1763 # Create a group generator and only pass in a changenode
1760 1764 # lookup function as we need to collect no information
1761 1765 # from filenodes.
1762 1766 group = filerevlog.group(msng_filenode_lst,
1763 1767 lookup_filenode_link_func(fname))
1764 1768 for chnk in group:
1765 1769 yield chnk
1766 1770 if msng_filenode_set.has_key(fname):
1767 1771 # Don't need this anymore, toss it to free memory.
1768 1772 del msng_filenode_set[fname]
1769 1773 # Signal that no more groups are left.
1770 1774 yield changegroup.closechunk()
1771 1775
1772 1776 if msng_cl_lst:
1773 1777 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1774 1778
1775 1779 return util.chunkbuffer(gengroup())
1776 1780
1777 1781 def changegroup(self, basenodes, source):
1778 1782 """Generate a changegroup of all nodes that we have that a recipient
1779 1783 doesn't.
1780 1784
1781 1785 This is much easier than the previous function as we can assume that
1782 1786 the recipient has any changenode we aren't sending them."""
1783 1787
1784 1788 self.hook('preoutgoing', throw=True, source=source)
1785 1789
1786 1790 cl = self.changelog
1787 1791 nodes = cl.nodesbetween(basenodes, None)[0]
1788 1792 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1789 1793 self.changegroupinfo(nodes, source)
1790 1794
1791 1795 def identity(x):
1792 1796 return x
1793 1797
1794 1798 def gennodelst(revlog):
1795 1799 for r in xrange(0, revlog.count()):
1796 1800 n = revlog.node(r)
1797 1801 if revlog.linkrev(n) in revset:
1798 1802 yield n
1799 1803
1800 1804 def changed_file_collector(changedfileset):
1801 1805 def collect_changed_files(clnode):
1802 1806 c = cl.read(clnode)
1803 1807 for fname in c[3]:
1804 1808 changedfileset[fname] = 1
1805 1809 return collect_changed_files
1806 1810
1807 1811 def lookuprevlink_func(revlog):
1808 1812 def lookuprevlink(n):
1809 1813 return cl.node(revlog.linkrev(n))
1810 1814 return lookuprevlink
1811 1815
1812 1816 def gengroup():
1813 1817 # construct a list of all changed files
1814 1818 changedfiles = {}
1815 1819
1816 1820 for chnk in cl.group(nodes, identity,
1817 1821 changed_file_collector(changedfiles)):
1818 1822 yield chnk
1819 1823 changedfiles = changedfiles.keys()
1820 1824 changedfiles.sort()
1821 1825
1822 1826 mnfst = self.manifest
1823 1827 nodeiter = gennodelst(mnfst)
1824 1828 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1825 1829 yield chnk
1826 1830
1827 1831 for fname in changedfiles:
1828 1832 filerevlog = self.file(fname)
1829 1833 if filerevlog.count() == 0:
1830 1834 raise util.Abort(_("empty or missing revlog for %s") % fname)
1831 1835 nodeiter = gennodelst(filerevlog)
1832 1836 nodeiter = list(nodeiter)
1833 1837 if nodeiter:
1834 1838 yield changegroup.chunkheader(len(fname))
1835 1839 yield fname
1836 1840 lookup = lookuprevlink_func(filerevlog)
1837 1841 for chnk in filerevlog.group(nodeiter, lookup):
1838 1842 yield chnk
1839 1843
1840 1844 yield changegroup.closechunk()
1841 1845
1842 1846 if nodes:
1843 1847 self.hook('outgoing', node=hex(nodes[0]), source=source)
1844 1848
1845 1849 return util.chunkbuffer(gengroup())
1846 1850
1847 1851 def addchangegroup(self, source, srctype, url):
1848 1852 """add changegroup to repo.
1849 1853
1850 1854 return values:
1851 1855 - nothing changed or no source: 0
1852 1856 - more heads than before: 1+added heads (2..n)
1853 1857 - less heads than before: -1-removed heads (-2..-n)
1854 1858 - number of heads stays the same: 1
1855 1859 """
1856 1860 def csmap(x):
1857 1861 self.ui.debug(_("add changeset %s\n") % short(x))
1858 1862 return cl.count()
1859 1863
1860 1864 def revmap(x):
1861 1865 return cl.rev(x)
1862 1866
1863 1867 if not source:
1864 1868 return 0
1865 1869
1866 1870 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1867 1871
1868 1872 changesets = files = revisions = 0
1869 1873
1870 1874 # write changelog data to temp files so concurrent readers will not see
1871 1875 # inconsistent view
1872 1876 cl = self.changelog
1873 1877 cl.delayupdate()
1874 1878 oldheads = len(cl.heads())
1875 1879
1876 1880 tr = self.transaction()
1877 1881 try:
1878 1882 trp = weakref.proxy(tr)
1879 1883 # pull off the changeset group
1880 1884 self.ui.status(_("adding changesets\n"))
1881 1885 cor = cl.count() - 1
1882 1886 chunkiter = changegroup.chunkiter(source)
1883 1887 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1884 1888 raise util.Abort(_("received changelog group is empty"))
1885 1889 cnr = cl.count() - 1
1886 1890 changesets = cnr - cor
1887 1891
1888 1892 # pull off the manifest group
1889 1893 self.ui.status(_("adding manifests\n"))
1890 1894 chunkiter = changegroup.chunkiter(source)
1891 1895 # no need to check for empty manifest group here:
1892 1896 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1893 1897 # no new manifest will be created and the manifest group will
1894 1898 # be empty during the pull
1895 1899 self.manifest.addgroup(chunkiter, revmap, trp)
1896 1900
1897 1901 # process the files
1898 1902 self.ui.status(_("adding file changes\n"))
1899 1903 while 1:
1900 1904 f = changegroup.getchunk(source)
1901 1905 if not f:
1902 1906 break
1903 1907 self.ui.debug(_("adding %s revisions\n") % f)
1904 1908 fl = self.file(f)
1905 1909 o = fl.count()
1906 1910 chunkiter = changegroup.chunkiter(source)
1907 1911 if fl.addgroup(chunkiter, revmap, trp) is None:
1908 1912 raise util.Abort(_("received file revlog group is empty"))
1909 1913 revisions += fl.count() - o
1910 1914 files += 1
1911 1915
1912 1916 # make changelog see real files again
1913 1917 cl.finalize(trp)
1914 1918
1915 1919 newheads = len(self.changelog.heads())
1916 1920 heads = ""
1917 1921 if oldheads and newheads != oldheads:
1918 1922 heads = _(" (%+d heads)") % (newheads - oldheads)
1919 1923
1920 1924 self.ui.status(_("added %d changesets"
1921 1925 " with %d changes to %d files%s\n")
1922 1926 % (changesets, revisions, files, heads))
1923 1927
1924 1928 if changesets > 0:
1925 1929 self.hook('pretxnchangegroup', throw=True,
1926 1930 node=hex(self.changelog.node(cor+1)), source=srctype,
1927 1931 url=url)
1928 1932
1929 1933 tr.close()
1930 1934 finally:
1931 1935 del tr
1932 1936
1933 1937 if changesets > 0:
1934 1938 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1935 1939 source=srctype, url=url)
1936 1940
1937 1941 for i in xrange(cor + 1, cnr + 1):
1938 1942 self.hook("incoming", node=hex(self.changelog.node(i)),
1939 1943 source=srctype, url=url)
1940 1944
1941 1945 # never return 0 here:
1942 1946 if newheads < oldheads:
1943 1947 return newheads - oldheads - 1
1944 1948 else:
1945 1949 return newheads - oldheads + 1
1946 1950
1947 1951
1948 1952 def stream_in(self, remote):
1949 1953 fp = remote.stream_out()
1950 1954 l = fp.readline()
1951 1955 try:
1952 1956 resp = int(l)
1953 1957 except ValueError:
1954 1958 raise util.UnexpectedOutput(
1955 1959 _('Unexpected response from remote server:'), l)
1956 1960 if resp == 1:
1957 1961 raise util.Abort(_('operation forbidden by server'))
1958 1962 elif resp == 2:
1959 1963 raise util.Abort(_('locking the remote repository failed'))
1960 1964 elif resp != 0:
1961 1965 raise util.Abort(_('the server sent an unknown error code'))
1962 1966 self.ui.status(_('streaming all changes\n'))
1963 1967 l = fp.readline()
1964 1968 try:
1965 1969 total_files, total_bytes = map(int, l.split(' ', 1))
1966 1970 except ValueError, TypeError:
1967 1971 raise util.UnexpectedOutput(
1968 1972 _('Unexpected response from remote server:'), l)
1969 1973 self.ui.status(_('%d files to transfer, %s of data\n') %
1970 1974 (total_files, util.bytecount(total_bytes)))
1971 1975 start = time.time()
1972 1976 for i in xrange(total_files):
1973 1977 # XXX doesn't support '\n' or '\r' in filenames
1974 1978 l = fp.readline()
1975 1979 try:
1976 1980 name, size = l.split('\0', 1)
1977 1981 size = int(size)
1978 1982 except ValueError, TypeError:
1979 1983 raise util.UnexpectedOutput(
1980 1984 _('Unexpected response from remote server:'), l)
1981 1985 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1982 1986 ofp = self.sopener(name, 'w')
1983 1987 for chunk in util.filechunkiter(fp, limit=size):
1984 1988 ofp.write(chunk)
1985 1989 ofp.close()
1986 1990 elapsed = time.time() - start
1987 1991 if elapsed <= 0:
1988 1992 elapsed = 0.001
1989 1993 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1990 1994 (util.bytecount(total_bytes), elapsed,
1991 1995 util.bytecount(total_bytes / elapsed)))
1992 1996 self.invalidate()
1993 1997 return len(self.heads()) + 1
1994 1998
1995 1999 def clone(self, remote, heads=[], stream=False):
1996 2000 '''clone remote repository.
1997 2001
1998 2002 keyword arguments:
1999 2003 heads: list of revs to clone (forces use of pull)
2000 2004 stream: use streaming clone if possible'''
2001 2005
2002 2006 # now, all clients that can request uncompressed clones can
2003 2007 # read repo formats supported by all servers that can serve
2004 2008 # them.
2005 2009
2006 2010 # if revlog format changes, client will have to check version
2007 2011 # and format flags on "stream" capability, and use
2008 2012 # uncompressed only if compatible.
2009 2013
2010 2014 if stream and not heads and remote.capable('stream'):
2011 2015 return self.stream_in(remote)
2012 2016 return self.pull(remote, heads)
2013 2017
2014 2018 # used to avoid circular references so destructors work
2015 2019 def aftertrans(files):
2016 2020 renamefiles = [tuple(t) for t in files]
2017 2021 def a():
2018 2022 for src, dest in renamefiles:
2019 2023 util.rename(src, dest)
2020 2024 return a
2021 2025
2022 2026 def instance(ui, path, create):
2023 2027 return localrepository(ui, util.drop_scheme('file', path), create)
2024 2028
2025 2029 def islocal(path):
2026 2030 return True
@@ -1,7 +1,7 b''
1 1 0
2 2 0
3 3 adding changesets
4 4 transaction abort!
5 5 rollback completed
6 6 killed!
7 .hg/00changelog.i .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
7 .hg/00changelog.i .hg/journal.branch .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
@@ -1,15 +1,21 b''
1 1 #!/bin/sh
2 2
3 3 mkdir t
4 4 cd t
5 5 hg init
6 6 echo a > a
7 7 hg add a
8 8 hg commit -m "test" -d "1000000 0"
9 9 hg verify
10 10 hg parents
11 11 hg status
12 12 hg rollback
13 13 hg verify
14 14 hg parents
15 15 hg status
16
17 # Test issue 902
18 hg commit -m "test"
19 hg branch test
20 hg rollback
21 hg branch
@@ -1,18 +1,21 b''
1 1 checking changesets
2 2 checking manifests
3 3 crosschecking files in changesets and manifests
4 4 checking files
5 5 1 files, 1 changesets, 1 total revisions
6 6 changeset: 0:0acdaf898367
7 7 tag: tip
8 8 user: test
9 9 date: Mon Jan 12 13:46:40 1970 +0000
10 10 summary: test
11 11
12 12 rolling back last transaction
13 13 checking changesets
14 14 checking manifests
15 15 crosschecking files in changesets and manifests
16 16 checking files
17 17 0 files, 0 changesets, 0 total revisions
18 18 A a
19 marked working directory as branch test
20 rolling back last transaction
21 default
General Comments 0
You need to be logged in to leave comments. Login now