##// END OF EJS Templates
localrepo: add destroyed() method for strip/rollback to use (issue548).
Greg Ward -
r9150:09a1ee49 default
parent child Browse files
Show More
@@ -1,2124 +1,2135 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92
93 93 # These two define the set of tags for this repository. _tags
94 94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 95 # 'local'. (Global tags are defined by .hgtags across all
96 96 # heads, and local tags are defined in .hg/localtags.) They
97 97 # constitute the in-memory cache of tags.
98 98 self._tags = None
99 99 self._tagtypes = None
100 100
101 101 self.branchcache = None
102 102 self._ubranchcache = None # UTF-8 version of branchcache
103 103 self._branchcachetip = None
104 104 self.nodetagscache = None
105 105 self.filterpats = {}
106 106 self._datafilters = {}
107 107 self._transref = self._lockref = self._wlockref = None
108 108
109 109 @propertycache
110 110 def changelog(self):
111 111 c = changelog.changelog(self.sopener)
112 112 if 'HG_PENDING' in os.environ:
113 113 p = os.environ['HG_PENDING']
114 114 if p.startswith(self.root):
115 115 c.readpending('00changelog.i.a')
116 116 self.sopener.defversion = c.version
117 117 return c
118 118
119 119 @propertycache
120 120 def manifest(self):
121 121 return manifest.manifest(self.sopener)
122 122
123 123 @propertycache
124 124 def dirstate(self):
125 125 return dirstate.dirstate(self.opener, self.ui, self.root)
126 126
127 127 def __getitem__(self, changeid):
128 128 if changeid is None:
129 129 return context.workingctx(self)
130 130 return context.changectx(self, changeid)
131 131
132 132 def __nonzero__(self):
133 133 return True
134 134
135 135 def __len__(self):
136 136 return len(self.changelog)
137 137
138 138 def __iter__(self):
139 139 for i in xrange(len(self)):
140 140 yield i
141 141
142 142 def url(self):
143 143 return 'file:' + self.root
144 144
145 145 def hook(self, name, throw=False, **args):
146 146 return hook.hook(self.ui, self, name, throw, **args)
147 147
148 148 tag_disallowed = ':\r\n'
149 149
150 150 def _tag(self, names, node, message, local, user, date, extra={}):
151 151 if isinstance(names, str):
152 152 allchars = names
153 153 names = (names,)
154 154 else:
155 155 allchars = ''.join(names)
156 156 for c in self.tag_disallowed:
157 157 if c in allchars:
158 158 raise util.Abort(_('%r cannot be used in a tag name') % c)
159 159
160 160 for name in names:
161 161 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 162 local=local)
163 163
164 164 def writetags(fp, names, munge, prevtags):
165 165 fp.seek(0, 2)
166 166 if prevtags and prevtags[-1] != '\n':
167 167 fp.write('\n')
168 168 for name in names:
169 169 m = munge and munge(name) or name
170 170 if self._tagtypes and name in self._tagtypes:
171 171 old = self._tags.get(name, nullid)
172 172 fp.write('%s %s\n' % (hex(old), m))
173 173 fp.write('%s %s\n' % (hex(node), m))
174 174 fp.close()
175 175
176 176 prevtags = ''
177 177 if local:
178 178 try:
179 179 fp = self.opener('localtags', 'r+')
180 180 except IOError:
181 181 fp = self.opener('localtags', 'a')
182 182 else:
183 183 prevtags = fp.read()
184 184
185 185 # local tags are stored in the current charset
186 186 writetags(fp, names, None, prevtags)
187 187 for name in names:
188 188 self.hook('tag', node=hex(node), tag=name, local=local)
189 189 return
190 190
191 191 try:
192 192 fp = self.wfile('.hgtags', 'rb+')
193 193 except IOError:
194 194 fp = self.wfile('.hgtags', 'ab')
195 195 else:
196 196 prevtags = fp.read()
197 197
198 198 # committed tags are stored in UTF-8
199 199 writetags(fp, names, encoding.fromlocal, prevtags)
200 200
201 201 if '.hgtags' not in self.dirstate:
202 202 self.add(['.hgtags'])
203 203
204 204 m = match_.exact(self.root, '', ['.hgtags'])
205 205 tagnode = self.commit(message, user, date, extra=extra, match=m)
206 206
207 207 for name in names:
208 208 self.hook('tag', node=hex(node), tag=name, local=local)
209 209
210 210 return tagnode
211 211
212 212 def tag(self, names, node, message, local, user, date):
213 213 '''tag a revision with one or more symbolic names.
214 214
215 215 names is a list of strings or, when adding a single tag, names may be a
216 216 string.
217 217
218 218 if local is True, the tags are stored in a per-repository file.
219 219 otherwise, they are stored in the .hgtags file, and a new
220 220 changeset is committed with the change.
221 221
222 222 keyword arguments:
223 223
224 224 local: whether to store tags in non-version-controlled file
225 225 (default False)
226 226
227 227 message: commit message to use if committing
228 228
229 229 user: name of user to use if committing
230 230
231 231 date: date tuple to use if committing'''
232 232
233 233 for x in self.status()[:5]:
234 234 if '.hgtags' in x:
235 235 raise util.Abort(_('working copy of .hgtags is changed '
236 236 '(please commit .hgtags manually)'))
237 237
238 238 self.tags() # instantiate the cache
239 239 self._tag(names, node, message, local, user, date)
240 240
241 241 def tags(self):
242 242 '''return a mapping of tag to node'''
243 243 if self._tags is None:
244 244 (self._tags, self._tagtypes) = self._findtags()
245 245
246 246 return self._tags
247 247
248 248 def _findtags(self):
249 249 '''Do the hard work of finding tags. Return a pair of dicts
250 250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 251 maps tag name to a string like \'global\' or \'local\'.
252 252 Subclasses or extensions are free to add their own tags, but
253 253 should be aware that the returned dicts will be retained for the
254 254 duration of the localrepo object.'''
255 255
256 256 # XXX what tagtype should subclasses/extensions use? Currently
257 257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 258 # Should each extension invent its own tag type? Should there
259 259 # be one tagtype for all such "virtual" tags? Or is the status
260 260 # quo fine?
261 261
262 262 alltags = {} # map tag name to (node, hist)
263 263 tagtypes = {}
264 264
265 265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267 267
268 268 tags = {}
269 269 for (name, (node, hist)) in alltags.iteritems():
270 270 if node != nullid:
271 271 tags[name] = node
272 272 tags['tip'] = self.changelog.tip()
273 273 return (tags, tagtypes)
274 274
275 275 def tagtype(self, tagname):
276 276 '''
277 277 return the type of the given tag. result can be:
278 278
279 279 'local' : a local tag
280 280 'global' : a global tag
281 281 None : tag does not exist
282 282 '''
283 283
284 284 self.tags()
285 285
286 286 return self._tagtypes.get(tagname)
287 287
288 288 def tagslist(self):
289 289 '''return a list of tags ordered by revision'''
290 290 l = []
291 291 for t, n in self.tags().iteritems():
292 292 try:
293 293 r = self.changelog.rev(n)
294 294 except:
295 295 r = -2 # sort to the beginning of the list if unknown
296 296 l.append((r, t, n))
297 297 return [(t, n) for r, t, n in sorted(l)]
298 298
299 299 def nodetags(self, node):
300 300 '''return the tags associated with a node'''
301 301 if not self.nodetagscache:
302 302 self.nodetagscache = {}
303 303 for t, n in self.tags().iteritems():
304 304 self.nodetagscache.setdefault(n, []).append(t)
305 305 return self.nodetagscache.get(node, [])
306 306
307 307 def _branchtags(self, partial, lrev):
308 308 # TODO: rename this function?
309 309 tiprev = len(self) - 1
310 310 if lrev != tiprev:
311 311 self._updatebranchcache(partial, lrev+1, tiprev+1)
312 312 self._writebranchcache(partial, self.changelog.tip(), tiprev)
313 313
314 314 return partial
315 315
316 316 def branchmap(self):
317 317 tip = self.changelog.tip()
318 318 if self.branchcache is not None and self._branchcachetip == tip:
319 319 return self.branchcache
320 320
321 321 oldtip = self._branchcachetip
322 322 self._branchcachetip = tip
323 323 if self.branchcache is None:
324 324 self.branchcache = {} # avoid recursion in changectx
325 325 else:
326 326 self.branchcache.clear() # keep using the same dict
327 327 if oldtip is None or oldtip not in self.changelog.nodemap:
328 328 partial, last, lrev = self._readbranchcache()
329 329 else:
330 330 lrev = self.changelog.rev(oldtip)
331 331 partial = self._ubranchcache
332 332
333 333 self._branchtags(partial, lrev)
334 334 # this private cache holds all heads (not just tips)
335 335 self._ubranchcache = partial
336 336
337 337 # the branch cache is stored on disk as UTF-8, but in the local
338 338 # charset internally
339 339 for k, v in partial.iteritems():
340 340 self.branchcache[encoding.tolocal(k)] = v
341 341 return self.branchcache
342 342
343 343
344 344 def branchtags(self):
345 345 '''return a dict where branch names map to the tipmost head of
346 346 the branch, open heads come before closed'''
347 347 bt = {}
348 348 for bn, heads in self.branchmap().iteritems():
349 349 head = None
350 350 for i in range(len(heads)-1, -1, -1):
351 351 h = heads[i]
352 352 if 'close' not in self.changelog.read(h)[5]:
353 353 head = h
354 354 break
355 355 # no open heads were found
356 356 if head is None:
357 357 head = heads[-1]
358 358 bt[bn] = head
359 359 return bt
360 360
361 361
362 362 def _readbranchcache(self):
363 363 partial = {}
364 364 try:
365 365 f = self.opener("branchheads.cache")
366 366 lines = f.read().split('\n')
367 367 f.close()
368 368 except (IOError, OSError):
369 369 return {}, nullid, nullrev
370 370
371 371 try:
372 372 last, lrev = lines.pop(0).split(" ", 1)
373 373 last, lrev = bin(last), int(lrev)
374 374 if lrev >= len(self) or self[lrev].node() != last:
375 375 # invalidate the cache
376 376 raise ValueError('invalidating branch cache (tip differs)')
377 377 for l in lines:
378 378 if not l: continue
379 379 node, label = l.split(" ", 1)
380 380 partial.setdefault(label.strip(), []).append(bin(node))
381 381 except KeyboardInterrupt:
382 382 raise
383 383 except Exception, inst:
384 384 if self.ui.debugflag:
385 385 self.ui.warn(str(inst), '\n')
386 386 partial, last, lrev = {}, nullid, nullrev
387 387 return partial, last, lrev
388 388
389 389 def _writebranchcache(self, branches, tip, tiprev):
390 390 try:
391 391 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 392 f.write("%s %s\n" % (hex(tip), tiprev))
393 393 for label, nodes in branches.iteritems():
394 394 for node in nodes:
395 395 f.write("%s %s\n" % (hex(node), label))
396 396 f.rename()
397 397 except (IOError, OSError):
398 398 pass
399 399
400 400 def _updatebranchcache(self, partial, start, end):
401 401 # collect new branch entries
402 402 newbranches = {}
403 403 for r in xrange(start, end):
404 404 c = self[r]
405 405 newbranches.setdefault(c.branch(), []).append(c.node())
406 406 # if older branchheads are reachable from new ones, they aren't
407 407 # really branchheads. Note checking parents is insufficient:
408 408 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 409 for branch, newnodes in newbranches.iteritems():
410 410 bheads = partial.setdefault(branch, [])
411 411 bheads.extend(newnodes)
412 412 if len(bheads) < 2:
413 413 continue
414 414 newbheads = []
415 415 # starting from tip means fewer passes over reachable
416 416 while newnodes:
417 417 latest = newnodes.pop()
418 418 if latest not in bheads:
419 419 continue
420 420 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 421 reachable = self.changelog.reachable(latest, minbhrev)
422 422 bheads = [b for b in bheads if b not in reachable]
423 423 newbheads.insert(0, latest)
424 424 bheads.extend(newbheads)
425 425 partial[branch] = bheads
426 426
427 427 def lookup(self, key):
428 428 if isinstance(key, int):
429 429 return self.changelog.node(key)
430 430 elif key == '.':
431 431 return self.dirstate.parents()[0]
432 432 elif key == 'null':
433 433 return nullid
434 434 elif key == 'tip':
435 435 return self.changelog.tip()
436 436 n = self.changelog._match(key)
437 437 if n:
438 438 return n
439 439 if key in self.tags():
440 440 return self.tags()[key]
441 441 if key in self.branchtags():
442 442 return self.branchtags()[key]
443 443 n = self.changelog._partialmatch(key)
444 444 if n:
445 445 return n
446 446
447 447 # can't find key, check if it might have come from damaged dirstate
448 448 if key in self.dirstate.parents():
449 449 raise error.Abort(_("working directory has unknown parent '%s'!")
450 450 % short(key))
451 451 try:
452 452 if len(key) == 20:
453 453 key = hex(key)
454 454 except:
455 455 pass
456 456 raise error.RepoError(_("unknown revision '%s'") % key)
457 457
458 458 def local(self):
459 459 return True
460 460
461 461 def join(self, f):
462 462 return os.path.join(self.path, f)
463 463
464 464 def wjoin(self, f):
465 465 return os.path.join(self.root, f)
466 466
467 467 def rjoin(self, f):
468 468 return os.path.join(self.root, util.pconvert(f))
469 469
470 470 def file(self, f):
471 471 if f[0] == '/':
472 472 f = f[1:]
473 473 return filelog.filelog(self.sopener, f)
474 474
475 475 def changectx(self, changeid):
476 476 return self[changeid]
477 477
478 478 def parents(self, changeid=None):
479 479 '''get list of changectxs for parents of changeid'''
480 480 return self[changeid].parents()
481 481
482 482 def filectx(self, path, changeid=None, fileid=None):
483 483 """changeid can be a changeset revision, node, or tag.
484 484 fileid can be a file revision or node."""
485 485 return context.filectx(self, path, changeid, fileid)
486 486
487 487 def getcwd(self):
488 488 return self.dirstate.getcwd()
489 489
490 490 def pathto(self, f, cwd=None):
491 491 return self.dirstate.pathto(f, cwd)
492 492
493 493 def wfile(self, f, mode='r'):
494 494 return self.wopener(f, mode)
495 495
496 496 def _link(self, f):
497 497 return os.path.islink(self.wjoin(f))
498 498
499 499 def _filter(self, filter, filename, data):
500 500 if filter not in self.filterpats:
501 501 l = []
502 502 for pat, cmd in self.ui.configitems(filter):
503 503 if cmd == '!':
504 504 continue
505 505 mf = match_.match(self.root, '', [pat])
506 506 fn = None
507 507 params = cmd
508 508 for name, filterfn in self._datafilters.iteritems():
509 509 if cmd.startswith(name):
510 510 fn = filterfn
511 511 params = cmd[len(name):].lstrip()
512 512 break
513 513 if not fn:
514 514 fn = lambda s, c, **kwargs: util.filter(s, c)
515 515 # Wrap old filters not supporting keyword arguments
516 516 if not inspect.getargspec(fn)[2]:
517 517 oldfn = fn
518 518 fn = lambda s, c, **kwargs: oldfn(s, c)
519 519 l.append((mf, fn, params))
520 520 self.filterpats[filter] = l
521 521
522 522 for mf, fn, cmd in self.filterpats[filter]:
523 523 if mf(filename):
524 524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 525 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 526 break
527 527
528 528 return data
529 529
530 530 def adddatafilter(self, name, filter):
531 531 self._datafilters[name] = filter
532 532
533 533 def wread(self, filename):
534 534 if self._link(filename):
535 535 data = os.readlink(self.wjoin(filename))
536 536 else:
537 537 data = self.wopener(filename, 'r').read()
538 538 return self._filter("encode", filename, data)
539 539
540 540 def wwrite(self, filename, data, flags):
541 541 data = self._filter("decode", filename, data)
542 542 try:
543 543 os.unlink(self.wjoin(filename))
544 544 except OSError:
545 545 pass
546 546 if 'l' in flags:
547 547 self.wopener.symlink(data, filename)
548 548 else:
549 549 self.wopener(filename, 'w').write(data)
550 550 if 'x' in flags:
551 551 util.set_flags(self.wjoin(filename), False, True)
552 552
553 553 def wwritedata(self, filename, data):
554 554 return self._filter("decode", filename, data)
555 555
556 556 def transaction(self):
557 557 tr = self._transref and self._transref() or None
558 558 if tr and tr.running():
559 559 return tr.nest()
560 560
561 561 # abort here if the journal already exists
562 562 if os.path.exists(self.sjoin("journal")):
563 563 raise error.RepoError(_("journal already exists - run hg recover"))
564 564
565 565 # save dirstate for rollback
566 566 try:
567 567 ds = self.opener("dirstate").read()
568 568 except IOError:
569 569 ds = ""
570 570 self.opener("journal.dirstate", "w").write(ds)
571 571 self.opener("journal.branch", "w").write(self.dirstate.branch())
572 572
573 573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 574 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 575 (self.join("journal.branch"), self.join("undo.branch"))]
576 576 tr = transaction.transaction(self.ui.warn, self.sopener,
577 577 self.sjoin("journal"),
578 578 aftertrans(renames),
579 579 self.store.createmode)
580 580 self._transref = weakref.ref(tr)
581 581 return tr
582 582
583 583 def recover(self):
584 584 lock = self.lock()
585 585 try:
586 586 if os.path.exists(self.sjoin("journal")):
587 587 self.ui.status(_("rolling back interrupted transaction\n"))
588 588 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
589 589 self.invalidate()
590 590 return True
591 591 else:
592 592 self.ui.warn(_("no interrupted transaction available\n"))
593 593 return False
594 594 finally:
595 595 lock.release()
596 596
597 597 def rollback(self):
598 598 wlock = lock = None
599 599 try:
600 600 wlock = self.wlock()
601 601 lock = self.lock()
602 602 if os.path.exists(self.sjoin("undo")):
603 603 self.ui.status(_("rolling back last transaction\n"))
604 604 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
605 605 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 606 try:
607 607 branch = self.opener("undo.branch").read()
608 608 self.dirstate.setbranch(branch)
609 609 except IOError:
610 610 self.ui.warn(_("Named branch could not be reset, "
611 611 "current branch still is: %s\n")
612 612 % encoding.tolocal(self.dirstate.branch()))
613 613 self.invalidate()
614 614 self.dirstate.invalidate()
615 self.destroyed()
615 616 else:
616 617 self.ui.warn(_("no rollback information available\n"))
617 618 finally:
618 619 release(lock, wlock)
619 620
620 621 def invalidate(self):
621 622 for a in "changelog manifest".split():
622 623 if a in self.__dict__:
623 624 delattr(self, a)
624 625 self._tags = None
625 626 self._tagtypes = None
626 627 self.nodetagscache = None
627 628 self.branchcache = None
628 629 self._ubranchcache = None
629 630 self._branchcachetip = None
630 631
631 632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 633 try:
633 634 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 635 except error.LockHeld, inst:
635 636 if not wait:
636 637 raise
637 638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 639 (desc, inst.locker))
639 640 # default to 600 seconds timeout
640 641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 642 releasefn, desc=desc)
642 643 if acquirefn:
643 644 acquirefn()
644 645 return l
645 646
646 647 def lock(self, wait=True):
647 648 l = self._lockref and self._lockref()
648 649 if l is not None and l.held:
649 650 l.lock()
650 651 return l
651 652
652 653 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
653 654 _('repository %s') % self.origroot)
654 655 self._lockref = weakref.ref(l)
655 656 return l
656 657
657 658 def wlock(self, wait=True):
658 659 l = self._wlockref and self._wlockref()
659 660 if l is not None and l.held:
660 661 l.lock()
661 662 return l
662 663
663 664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 665 self.dirstate.invalidate, _('working directory of %s') %
665 666 self.origroot)
666 667 self._wlockref = weakref.ref(l)
667 668 return l
668 669
669 670 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
670 671 """
671 672 commit an individual file as part of a larger transaction
672 673 """
673 674
674 675 fname = fctx.path()
675 676 text = fctx.data()
676 677 flog = self.file(fname)
677 678 fparent1 = manifest1.get(fname, nullid)
678 679 fparent2 = fparent2o = manifest2.get(fname, nullid)
679 680
680 681 meta = {}
681 682 copy = fctx.renamed()
682 683 if copy and copy[0] != fname:
683 684 # Mark the new revision of this file as a copy of another
684 685 # file. This copy data will effectively act as a parent
685 686 # of this new revision. If this is a merge, the first
686 687 # parent will be the nullid (meaning "look up the copy data")
687 688 # and the second one will be the other parent. For example:
688 689 #
689 690 # 0 --- 1 --- 3 rev1 changes file foo
690 691 # \ / rev2 renames foo to bar and changes it
691 692 # \- 2 -/ rev3 should have bar with all changes and
692 693 # should record that bar descends from
693 694 # bar in rev2 and foo in rev1
694 695 #
695 696 # this allows this merge to succeed:
696 697 #
697 698 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
698 699 # \ / merging rev3 and rev4 should use bar@rev2
699 700 # \- 2 --- 4 as the merge base
700 701 #
701 702
702 703 cfname = copy[0]
703 704 crev = manifest1.get(cfname)
704 705 newfparent = fparent2
705 706
706 707 if manifest2: # branch merge
707 708 if fparent2 == nullid or crev is None: # copied on remote side
708 709 if cfname in manifest2:
709 710 crev = manifest2[cfname]
710 711 newfparent = fparent1
711 712
712 713 # find source in nearest ancestor if we've lost track
713 714 if not crev:
714 715 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
715 716 (fname, cfname))
716 717 for ancestor in self['.'].ancestors():
717 718 if cfname in ancestor:
718 719 crev = ancestor[cfname].filenode()
719 720 break
720 721
721 722 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
722 723 meta["copy"] = cfname
723 724 meta["copyrev"] = hex(crev)
724 725 fparent1, fparent2 = nullid, newfparent
725 726 elif fparent2 != nullid:
726 727 # is one parent an ancestor of the other?
727 728 fparentancestor = flog.ancestor(fparent1, fparent2)
728 729 if fparentancestor == fparent1:
729 730 fparent1, fparent2 = fparent2, nullid
730 731 elif fparentancestor == fparent2:
731 732 fparent2 = nullid
732 733
733 734 # is the file changed?
734 735 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
735 736 changelist.append(fname)
736 737 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
737 738
738 739 # are just the flags changed during merge?
739 740 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
740 741 changelist.append(fname)
741 742
742 743 return fparent1
743 744
744 745 def commit(self, text="", user=None, date=None, match=None, force=False,
745 746 editor=False, extra={}):
746 747 """Add a new revision to current repository.
747 748
748 749 Revision information is gathered from the working directory,
749 750 match can be used to filter the committed files. If editor is
750 751 supplied, it is called to get a commit message.
751 752 """
752 753
753 754 def fail(f, msg):
754 755 raise util.Abort('%s: %s' % (f, msg))
755 756
756 757 if not match:
757 758 match = match_.always(self.root, '')
758 759
759 760 if not force:
760 761 vdirs = []
761 762 match.dir = vdirs.append
762 763 match.bad = fail
763 764
764 765 wlock = self.wlock()
765 766 try:
766 767 p1, p2 = self.dirstate.parents()
767 768 wctx = self[None]
768 769
769 770 if (not force and p2 != nullid and match and
770 771 (match.files() or match.anypats())):
771 772 raise util.Abort(_('cannot partially commit a merge '
772 773 '(do not specify files or patterns)'))
773 774
774 775 changes = self.status(match=match, clean=force)
775 776 if force:
776 777 changes[0].extend(changes[6]) # mq may commit unchanged files
777 778
778 779 # check subrepos
779 780 subs = []
780 781 for s in wctx.substate:
781 782 if match(s) and wctx.sub(s).dirty():
782 783 subs.append(s)
783 784 if subs and '.hgsubstate' not in changes[0]:
784 785 changes[0].insert(0, '.hgsubstate')
785 786
786 787 # make sure all explicit patterns are matched
787 788 if not force and match.files():
788 789 matched = set(changes[0] + changes[1] + changes[2])
789 790
790 791 for f in match.files():
791 792 if f == '.' or f in matched or f in wctx.substate:
792 793 continue
793 794 if f in changes[3]: # missing
794 795 fail(f, _('file not found!'))
795 796 if f in vdirs: # visited directory
796 797 d = f + '/'
797 798 for mf in matched:
798 799 if mf.startswith(d):
799 800 break
800 801 else:
801 802 fail(f, _("no match under directory!"))
802 803 elif f not in self.dirstate:
803 804 fail(f, _("file not tracked!"))
804 805
805 806 if (not force and not extra.get("close") and p2 == nullid
806 807 and not (changes[0] or changes[1] or changes[2])
807 808 and self[None].branch() == self['.'].branch()):
808 809 return None
809 810
810 811 ms = merge_.mergestate(self)
811 812 for f in changes[0]:
812 813 if f in ms and ms[f] == 'u':
813 814 raise util.Abort(_("unresolved merge conflicts "
814 815 "(see hg resolve)"))
815 816
816 817 cctx = context.workingctx(self, (p1, p2), text, user, date,
817 818 extra, changes)
818 819 if editor:
819 820 cctx._text = editor(self, cctx, subs)
820 821
821 822 # commit subs
822 823 if subs:
823 824 state = wctx.substate.copy()
824 825 for s in subs:
825 826 self.ui.status(_('committing subrepository %s\n') % s)
826 827 sr = wctx.sub(s).commit(cctx._text, user, date)
827 828 state[s] = (state[s][0], sr)
828 829 subrepo.writestate(self, state)
829 830
830 831 ret = self.commitctx(cctx, True)
831 832
832 833 # update dirstate and mergestate
833 834 for f in changes[0] + changes[1]:
834 835 self.dirstate.normal(f)
835 836 for f in changes[2]:
836 837 self.dirstate.forget(f)
837 838 self.dirstate.setparents(ret)
838 839 ms.reset()
839 840
840 841 return ret
841 842
842 843 finally:
843 844 wlock.release()
844 845
845 846 def commitctx(self, ctx, error=False):
846 847 """Add a new revision to current repository.
847 848
848 849 Revision information is passed via the context argument.
849 850 """
850 851
851 852 tr = lock = None
852 853 removed = ctx.removed()
853 854 p1, p2 = ctx.p1(), ctx.p2()
854 855 m1 = p1.manifest().copy()
855 856 m2 = p2.manifest()
856 857 user = ctx.user()
857 858
858 859 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
859 860 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
860 861
861 862 lock = self.lock()
862 863 try:
863 864 tr = self.transaction()
864 865 trp = weakref.proxy(tr)
865 866
866 867 # check in files
867 868 new = {}
868 869 changed = []
869 870 linkrev = len(self)
870 871 for f in sorted(ctx.modified() + ctx.added()):
871 872 self.ui.note(f + "\n")
872 873 try:
873 874 fctx = ctx[f]
874 875 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
875 876 changed)
876 877 m1.set(f, fctx.flags())
877 878 except (OSError, IOError):
878 879 if error:
879 880 self.ui.warn(_("trouble committing %s!\n") % f)
880 881 raise
881 882 else:
882 883 removed.append(f)
883 884
884 885 # update manifest
885 886 m1.update(new)
886 887 removed = [f for f in sorted(removed) if f in m1 or f in m2]
887 888 drop = [f for f in removed if f in m1]
888 889 for f in drop:
889 890 del m1[f]
890 891 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
891 892 p2.manifestnode(), (new, drop))
892 893
893 894 # update changelog
894 895 self.changelog.delayupdate()
895 896 n = self.changelog.add(mn, changed + removed, ctx.description(),
896 897 trp, p1.node(), p2.node(),
897 898 user, ctx.date(), ctx.extra().copy())
898 899 p = lambda: self.changelog.writepending() and self.root or ""
899 900 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
900 901 parent2=xp2, pending=p)
901 902 self.changelog.finalize(trp)
902 903 tr.close()
903 904
904 905 if self.branchcache:
905 906 self.branchtags()
906 907
907 908 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
908 909 return n
909 910 finally:
910 911 del tr
911 912 lock.release()
912 913
914 def destroyed(self):
915 '''Inform the repository that nodes have been destroyed.
916 Intended for use by strip and rollback, so there's a common
917 place for anything that has to be done after destroying history.'''
918 # Do nothing for now: this is a placeholder that will be used
919 # when we add tag caching.
920 # XXX it might be nice if we could take the list of destroyed
921 # nodes, but I don't see an easy way for rollback() to do that
922 pass
923
913 924 def walk(self, match, node=None):
914 925 '''
915 926 walk recursively through the directory tree or a given
916 927 changeset, finding all files matched by the match
917 928 function
918 929 '''
919 930 return self[node].walk(match)
920 931
921 932 def status(self, node1='.', node2=None, match=None,
922 933 ignored=False, clean=False, unknown=False):
923 934 """return status of files between two nodes or node and working directory
924 935
925 936 If node1 is None, use the first dirstate parent instead.
926 937 If node2 is None, compare node1 with working directory.
927 938 """
928 939
929 940 def mfmatches(ctx):
930 941 mf = ctx.manifest().copy()
931 942 for fn in mf.keys():
932 943 if not match(fn):
933 944 del mf[fn]
934 945 return mf
935 946
936 947 if isinstance(node1, context.changectx):
937 948 ctx1 = node1
938 949 else:
939 950 ctx1 = self[node1]
940 951 if isinstance(node2, context.changectx):
941 952 ctx2 = node2
942 953 else:
943 954 ctx2 = self[node2]
944 955
945 956 working = ctx2.rev() is None
946 957 parentworking = working and ctx1 == self['.']
947 958 match = match or match_.always(self.root, self.getcwd())
948 959 listignored, listclean, listunknown = ignored, clean, unknown
949 960
950 961 # load earliest manifest first for caching reasons
951 962 if not working and ctx2.rev() < ctx1.rev():
952 963 ctx2.manifest()
953 964
954 965 if not parentworking:
955 966 def bad(f, msg):
956 967 if f not in ctx1:
957 968 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
958 969 match.bad = bad
959 970
960 971 if working: # we need to scan the working dir
961 972 s = self.dirstate.status(match, listignored, listclean, listunknown)
962 973 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
963 974
964 975 # check for any possibly clean files
965 976 if parentworking and cmp:
966 977 fixup = []
967 978 # do a full compare of any files that might have changed
968 979 for f in sorted(cmp):
969 980 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
970 981 or ctx1[f].cmp(ctx2[f].data())):
971 982 modified.append(f)
972 983 else:
973 984 fixup.append(f)
974 985
975 986 if listclean:
976 987 clean += fixup
977 988
978 989 # update dirstate for files that are actually clean
979 990 if fixup:
980 991 try:
981 992 # updating the dirstate is optional
982 993 # so we don't wait on the lock
983 994 wlock = self.wlock(False)
984 995 try:
985 996 for f in fixup:
986 997 self.dirstate.normal(f)
987 998 finally:
988 999 wlock.release()
989 1000 except error.LockError:
990 1001 pass
991 1002
992 1003 if not parentworking:
993 1004 mf1 = mfmatches(ctx1)
994 1005 if working:
995 1006 # we are comparing working dir against non-parent
996 1007 # generate a pseudo-manifest for the working dir
997 1008 mf2 = mfmatches(self['.'])
998 1009 for f in cmp + modified + added:
999 1010 mf2[f] = None
1000 1011 mf2.set(f, ctx2.flags(f))
1001 1012 for f in removed:
1002 1013 if f in mf2:
1003 1014 del mf2[f]
1004 1015 else:
1005 1016 # we are comparing two revisions
1006 1017 deleted, unknown, ignored = [], [], []
1007 1018 mf2 = mfmatches(ctx2)
1008 1019
1009 1020 modified, added, clean = [], [], []
1010 1021 for fn in mf2:
1011 1022 if fn in mf1:
1012 1023 if (mf1.flags(fn) != mf2.flags(fn) or
1013 1024 (mf1[fn] != mf2[fn] and
1014 1025 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1015 1026 modified.append(fn)
1016 1027 elif listclean:
1017 1028 clean.append(fn)
1018 1029 del mf1[fn]
1019 1030 else:
1020 1031 added.append(fn)
1021 1032 removed = mf1.keys()
1022 1033
1023 1034 r = modified, added, removed, deleted, unknown, ignored, clean
1024 1035 [l.sort() for l in r]
1025 1036 return r
1026 1037
1027 1038 def add(self, list):
1028 1039 wlock = self.wlock()
1029 1040 try:
1030 1041 rejected = []
1031 1042 for f in list:
1032 1043 p = self.wjoin(f)
1033 1044 try:
1034 1045 st = os.lstat(p)
1035 1046 except:
1036 1047 self.ui.warn(_("%s does not exist!\n") % f)
1037 1048 rejected.append(f)
1038 1049 continue
1039 1050 if st.st_size > 10000000:
1040 1051 self.ui.warn(_("%s: files over 10MB may cause memory and"
1041 1052 " performance problems\n"
1042 1053 "(use 'hg revert %s' to unadd the file)\n")
1043 1054 % (f, f))
1044 1055 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1045 1056 self.ui.warn(_("%s not added: only files and symlinks "
1046 1057 "supported currently\n") % f)
1047 1058 rejected.append(p)
1048 1059 elif self.dirstate[f] in 'amn':
1049 1060 self.ui.warn(_("%s already tracked!\n") % f)
1050 1061 elif self.dirstate[f] == 'r':
1051 1062 self.dirstate.normallookup(f)
1052 1063 else:
1053 1064 self.dirstate.add(f)
1054 1065 return rejected
1055 1066 finally:
1056 1067 wlock.release()
1057 1068
1058 1069 def forget(self, list):
1059 1070 wlock = self.wlock()
1060 1071 try:
1061 1072 for f in list:
1062 1073 if self.dirstate[f] != 'a':
1063 1074 self.ui.warn(_("%s not added!\n") % f)
1064 1075 else:
1065 1076 self.dirstate.forget(f)
1066 1077 finally:
1067 1078 wlock.release()
1068 1079
1069 1080 def remove(self, list, unlink=False):
1070 1081 if unlink:
1071 1082 for f in list:
1072 1083 try:
1073 1084 util.unlink(self.wjoin(f))
1074 1085 except OSError, inst:
1075 1086 if inst.errno != errno.ENOENT:
1076 1087 raise
1077 1088 wlock = self.wlock()
1078 1089 try:
1079 1090 for f in list:
1080 1091 if unlink and os.path.exists(self.wjoin(f)):
1081 1092 self.ui.warn(_("%s still exists!\n") % f)
1082 1093 elif self.dirstate[f] == 'a':
1083 1094 self.dirstate.forget(f)
1084 1095 elif f not in self.dirstate:
1085 1096 self.ui.warn(_("%s not tracked!\n") % f)
1086 1097 else:
1087 1098 self.dirstate.remove(f)
1088 1099 finally:
1089 1100 wlock.release()
1090 1101
1091 1102 def undelete(self, list):
1092 1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1093 1104 for p in self.dirstate.parents() if p != nullid]
1094 1105 wlock = self.wlock()
1095 1106 try:
1096 1107 for f in list:
1097 1108 if self.dirstate[f] != 'r':
1098 1109 self.ui.warn(_("%s not removed!\n") % f)
1099 1110 else:
1100 1111 m = f in manifests[0] and manifests[0] or manifests[1]
1101 1112 t = self.file(f).read(m[f])
1102 1113 self.wwrite(f, t, m.flags(f))
1103 1114 self.dirstate.normal(f)
1104 1115 finally:
1105 1116 wlock.release()
1106 1117
1107 1118 def copy(self, source, dest):
1108 1119 p = self.wjoin(dest)
1109 1120 if not (os.path.exists(p) or os.path.islink(p)):
1110 1121 self.ui.warn(_("%s does not exist!\n") % dest)
1111 1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1112 1123 self.ui.warn(_("copy failed: %s is not a file or a "
1113 1124 "symbolic link\n") % dest)
1114 1125 else:
1115 1126 wlock = self.wlock()
1116 1127 try:
1117 1128 if self.dirstate[dest] in '?r':
1118 1129 self.dirstate.add(dest)
1119 1130 self.dirstate.copy(source, dest)
1120 1131 finally:
1121 1132 wlock.release()
1122 1133
1123 1134 def heads(self, start=None):
1124 1135 heads = self.changelog.heads(start)
1125 1136 # sort the output in rev descending order
1126 1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1127 1138 return [n for (r, n) in sorted(heads)]
1128 1139
1129 1140 def branchheads(self, branch=None, start=None, closed=False):
1130 1141 if branch is None:
1131 1142 branch = self[None].branch()
1132 1143 branches = self.branchmap()
1133 1144 if branch not in branches:
1134 1145 return []
1135 1146 bheads = branches[branch]
1136 1147 # the cache returns heads ordered lowest to highest
1137 1148 bheads.reverse()
1138 1149 if start is not None:
1139 1150 # filter out the heads that cannot be reached from startrev
1140 1151 bheads = self.changelog.nodesbetween([start], bheads)[2]
1141 1152 if not closed:
1142 1153 bheads = [h for h in bheads if
1143 1154 ('close' not in self.changelog.read(h)[5])]
1144 1155 return bheads
1145 1156
1146 1157 def branches(self, nodes):
1147 1158 if not nodes:
1148 1159 nodes = [self.changelog.tip()]
1149 1160 b = []
1150 1161 for n in nodes:
1151 1162 t = n
1152 1163 while 1:
1153 1164 p = self.changelog.parents(n)
1154 1165 if p[1] != nullid or p[0] == nullid:
1155 1166 b.append((t, n, p[0], p[1]))
1156 1167 break
1157 1168 n = p[0]
1158 1169 return b
1159 1170
1160 1171 def between(self, pairs):
1161 1172 r = []
1162 1173
1163 1174 for top, bottom in pairs:
1164 1175 n, l, i = top, [], 0
1165 1176 f = 1
1166 1177
1167 1178 while n != bottom and n != nullid:
1168 1179 p = self.changelog.parents(n)[0]
1169 1180 if i == f:
1170 1181 l.append(n)
1171 1182 f = f * 2
1172 1183 n = p
1173 1184 i += 1
1174 1185
1175 1186 r.append(l)
1176 1187
1177 1188 return r
1178 1189
1179 1190 def findincoming(self, remote, base=None, heads=None, force=False):
1180 1191 """Return list of roots of the subsets of missing nodes from remote
1181 1192
1182 1193 If base dict is specified, assume that these nodes and their parents
1183 1194 exist on the remote side and that no child of a node of base exists
1184 1195 in both remote and self.
1185 1196 Furthermore base will be updated to include the nodes that exists
1186 1197 in self and remote but no children exists in self and remote.
1187 1198 If a list of heads is specified, return only nodes which are heads
1188 1199 or ancestors of these heads.
1189 1200
1190 1201 All the ancestors of base are in self and in remote.
1191 1202 All the descendants of the list returned are missing in self.
1192 1203 (and so we know that the rest of the nodes are missing in remote, see
1193 1204 outgoing)
1194 1205 """
1195 1206 return self.findcommonincoming(remote, base, heads, force)[1]
1196 1207
1197 1208 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1198 1209 """Return a tuple (common, missing roots, heads) used to identify
1199 1210 missing nodes from remote.
1200 1211
1201 1212 If base dict is specified, assume that these nodes and their parents
1202 1213 exist on the remote side and that no child of a node of base exists
1203 1214 in both remote and self.
1204 1215 Furthermore base will be updated to include the nodes that exists
1205 1216 in self and remote but no children exists in self and remote.
1206 1217 If a list of heads is specified, return only nodes which are heads
1207 1218 or ancestors of these heads.
1208 1219
1209 1220 All the ancestors of base are in self and in remote.
1210 1221 """
1211 1222 m = self.changelog.nodemap
1212 1223 search = []
1213 1224 fetch = set()
1214 1225 seen = set()
1215 1226 seenbranch = set()
1216 1227 if base is None:
1217 1228 base = {}
1218 1229
1219 1230 if not heads:
1220 1231 heads = remote.heads()
1221 1232
1222 1233 if self.changelog.tip() == nullid:
1223 1234 base[nullid] = 1
1224 1235 if heads != [nullid]:
1225 1236 return [nullid], [nullid], list(heads)
1226 1237 return [nullid], [], []
1227 1238
1228 1239 # assume we're closer to the tip than the root
1229 1240 # and start by examining the heads
1230 1241 self.ui.status(_("searching for changes\n"))
1231 1242
1232 1243 unknown = []
1233 1244 for h in heads:
1234 1245 if h not in m:
1235 1246 unknown.append(h)
1236 1247 else:
1237 1248 base[h] = 1
1238 1249
1239 1250 heads = unknown
1240 1251 if not unknown:
1241 1252 return base.keys(), [], []
1242 1253
1243 1254 req = set(unknown)
1244 1255 reqcnt = 0
1245 1256
1246 1257 # search through remote branches
1247 1258 # a 'branch' here is a linear segment of history, with four parts:
1248 1259 # head, root, first parent, second parent
1249 1260 # (a branch always has two parents (or none) by definition)
1250 1261 unknown = remote.branches(unknown)
1251 1262 while unknown:
1252 1263 r = []
1253 1264 while unknown:
1254 1265 n = unknown.pop(0)
1255 1266 if n[0] in seen:
1256 1267 continue
1257 1268
1258 1269 self.ui.debug(_("examining %s:%s\n")
1259 1270 % (short(n[0]), short(n[1])))
1260 1271 if n[0] == nullid: # found the end of the branch
1261 1272 pass
1262 1273 elif n in seenbranch:
1263 1274 self.ui.debug(_("branch already found\n"))
1264 1275 continue
1265 1276 elif n[1] and n[1] in m: # do we know the base?
1266 1277 self.ui.debug(_("found incomplete branch %s:%s\n")
1267 1278 % (short(n[0]), short(n[1])))
1268 1279 search.append(n[0:2]) # schedule branch range for scanning
1269 1280 seenbranch.add(n)
1270 1281 else:
1271 1282 if n[1] not in seen and n[1] not in fetch:
1272 1283 if n[2] in m and n[3] in m:
1273 1284 self.ui.debug(_("found new changeset %s\n") %
1274 1285 short(n[1]))
1275 1286 fetch.add(n[1]) # earliest unknown
1276 1287 for p in n[2:4]:
1277 1288 if p in m:
1278 1289 base[p] = 1 # latest known
1279 1290
1280 1291 for p in n[2:4]:
1281 1292 if p not in req and p not in m:
1282 1293 r.append(p)
1283 1294 req.add(p)
1284 1295 seen.add(n[0])
1285 1296
1286 1297 if r:
1287 1298 reqcnt += 1
1288 1299 self.ui.debug(_("request %d: %s\n") %
1289 1300 (reqcnt, " ".join(map(short, r))))
1290 1301 for p in xrange(0, len(r), 10):
1291 1302 for b in remote.branches(r[p:p+10]):
1292 1303 self.ui.debug(_("received %s:%s\n") %
1293 1304 (short(b[0]), short(b[1])))
1294 1305 unknown.append(b)
1295 1306
1296 1307 # do binary search on the branches we found
1297 1308 while search:
1298 1309 newsearch = []
1299 1310 reqcnt += 1
1300 1311 for n, l in zip(search, remote.between(search)):
1301 1312 l.append(n[1])
1302 1313 p = n[0]
1303 1314 f = 1
1304 1315 for i in l:
1305 1316 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1306 1317 if i in m:
1307 1318 if f <= 2:
1308 1319 self.ui.debug(_("found new branch changeset %s\n") %
1309 1320 short(p))
1310 1321 fetch.add(p)
1311 1322 base[i] = 1
1312 1323 else:
1313 1324 self.ui.debug(_("narrowed branch search to %s:%s\n")
1314 1325 % (short(p), short(i)))
1315 1326 newsearch.append((p, i))
1316 1327 break
1317 1328 p, f = i, f * 2
1318 1329 search = newsearch
1319 1330
1320 1331 # sanity check our fetch list
1321 1332 for f in fetch:
1322 1333 if f in m:
1323 1334 raise error.RepoError(_("already have changeset ")
1324 1335 + short(f[:4]))
1325 1336
1326 1337 if base.keys() == [nullid]:
1327 1338 if force:
1328 1339 self.ui.warn(_("warning: repository is unrelated\n"))
1329 1340 else:
1330 1341 raise util.Abort(_("repository is unrelated"))
1331 1342
1332 1343 self.ui.debug(_("found new changesets starting at ") +
1333 1344 " ".join([short(f) for f in fetch]) + "\n")
1334 1345
1335 1346 self.ui.debug(_("%d total queries\n") % reqcnt)
1336 1347
1337 1348 return base.keys(), list(fetch), heads
1338 1349
1339 1350 def findoutgoing(self, remote, base=None, heads=None, force=False):
1340 1351 """Return list of nodes that are roots of subsets not in remote
1341 1352
1342 1353 If base dict is specified, assume that these nodes and their parents
1343 1354 exist on the remote side.
1344 1355 If a list of heads is specified, return only nodes which are heads
1345 1356 or ancestors of these heads, and return a second element which
1346 1357 contains all remote heads which get new children.
1347 1358 """
1348 1359 if base is None:
1349 1360 base = {}
1350 1361 self.findincoming(remote, base, heads, force=force)
1351 1362
1352 1363 self.ui.debug(_("common changesets up to ")
1353 1364 + " ".join(map(short, base.keys())) + "\n")
1354 1365
1355 1366 remain = set(self.changelog.nodemap)
1356 1367
1357 1368 # prune everything remote has from the tree
1358 1369 remain.remove(nullid)
1359 1370 remove = base.keys()
1360 1371 while remove:
1361 1372 n = remove.pop(0)
1362 1373 if n in remain:
1363 1374 remain.remove(n)
1364 1375 for p in self.changelog.parents(n):
1365 1376 remove.append(p)
1366 1377
1367 1378 # find every node whose parents have been pruned
1368 1379 subset = []
1369 1380 # find every remote head that will get new children
1370 1381 updated_heads = set()
1371 1382 for n in remain:
1372 1383 p1, p2 = self.changelog.parents(n)
1373 1384 if p1 not in remain and p2 not in remain:
1374 1385 subset.append(n)
1375 1386 if heads:
1376 1387 if p1 in heads:
1377 1388 updated_heads.add(p1)
1378 1389 if p2 in heads:
1379 1390 updated_heads.add(p2)
1380 1391
1381 1392 # this is the set of all roots we have to push
1382 1393 if heads:
1383 1394 return subset, list(updated_heads)
1384 1395 else:
1385 1396 return subset
1386 1397
1387 1398 def pull(self, remote, heads=None, force=False):
1388 1399 lock = self.lock()
1389 1400 try:
1390 1401 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1391 1402 force=force)
1392 1403 if fetch == [nullid]:
1393 1404 self.ui.status(_("requesting all changes\n"))
1394 1405
1395 1406 if not fetch:
1396 1407 self.ui.status(_("no changes found\n"))
1397 1408 return 0
1398 1409
1399 1410 if heads is None and remote.capable('changegroupsubset'):
1400 1411 heads = rheads
1401 1412
1402 1413 if heads is None:
1403 1414 cg = remote.changegroup(fetch, 'pull')
1404 1415 else:
1405 1416 if not remote.capable('changegroupsubset'):
1406 1417 raise util.Abort(_("Partial pull cannot be done because "
1407 1418 "other repository doesn't support "
1408 1419 "changegroupsubset."))
1409 1420 cg = remote.changegroupsubset(fetch, heads, 'pull')
1410 1421 return self.addchangegroup(cg, 'pull', remote.url())
1411 1422 finally:
1412 1423 lock.release()
1413 1424
1414 1425 def push(self, remote, force=False, revs=None):
1415 1426 # there are two ways to push to remote repo:
1416 1427 #
1417 1428 # addchangegroup assumes local user can lock remote
1418 1429 # repo (local filesystem, old ssh servers).
1419 1430 #
1420 1431 # unbundle assumes local user cannot lock remote repo (new ssh
1421 1432 # servers, http servers).
1422 1433
1423 1434 if remote.capable('unbundle'):
1424 1435 return self.push_unbundle(remote, force, revs)
1425 1436 return self.push_addchangegroup(remote, force, revs)
1426 1437
1427 1438 def prepush(self, remote, force, revs):
1428 1439 common = {}
1429 1440 remote_heads = remote.heads()
1430 1441 inc = self.findincoming(remote, common, remote_heads, force=force)
1431 1442
1432 1443 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1433 1444 if revs is not None:
1434 1445 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1435 1446 else:
1436 1447 bases, heads = update, self.changelog.heads()
1437 1448
1438 1449 def checkbranch(lheads, rheads, updatelh):
1439 1450 '''
1440 1451 check whether there are more local heads than remote heads on
1441 1452 a specific branch.
1442 1453
1443 1454 lheads: local branch heads
1444 1455 rheads: remote branch heads
1445 1456 updatelh: outgoing local branch heads
1446 1457 '''
1447 1458
1448 1459 warn = 0
1449 1460
1450 1461 if not revs and len(lheads) > len(rheads):
1451 1462 warn = 1
1452 1463 else:
1453 1464 updatelheads = [self.changelog.heads(x, lheads)
1454 1465 for x in updatelh]
1455 1466 newheads = set(sum(updatelheads, [])) & set(lheads)
1456 1467
1457 1468 if not newheads:
1458 1469 return True
1459 1470
1460 1471 for r in rheads:
1461 1472 if r in self.changelog.nodemap:
1462 1473 desc = self.changelog.heads(r, heads)
1463 1474 l = [h for h in heads if h in desc]
1464 1475 if not l:
1465 1476 newheads.add(r)
1466 1477 else:
1467 1478 newheads.add(r)
1468 1479 if len(newheads) > len(rheads):
1469 1480 warn = 1
1470 1481
1471 1482 if warn:
1472 1483 if not rheads: # new branch requires --force
1473 1484 self.ui.warn(_("abort: push creates new"
1474 1485 " remote branch '%s'!\n") %
1475 1486 self[updatelh[0]].branch())
1476 1487 else:
1477 1488 self.ui.warn(_("abort: push creates new remote heads!\n"))
1478 1489
1479 1490 self.ui.status(_("(did you forget to merge?"
1480 1491 " use push -f to force)\n"))
1481 1492 return False
1482 1493 return True
1483 1494
1484 1495 if not bases:
1485 1496 self.ui.status(_("no changes found\n"))
1486 1497 return None, 1
1487 1498 elif not force:
1488 1499 # Check for each named branch if we're creating new remote heads.
1489 1500 # To be a remote head after push, node must be either:
1490 1501 # - unknown locally
1491 1502 # - a local outgoing head descended from update
1492 1503 # - a remote head that's known locally and not
1493 1504 # ancestral to an outgoing head
1494 1505 #
1495 1506 # New named branches cannot be created without --force.
1496 1507
1497 1508 if remote_heads != [nullid]:
1498 1509 if remote.capable('branchmap'):
1499 1510 localhds = {}
1500 1511 if not revs:
1501 1512 localhds = self.branchmap()
1502 1513 else:
1503 1514 for n in heads:
1504 1515 branch = self[n].branch()
1505 1516 if branch in localhds:
1506 1517 localhds[branch].append(n)
1507 1518 else:
1508 1519 localhds[branch] = [n]
1509 1520
1510 1521 remotehds = remote.branchmap()
1511 1522
1512 1523 for lh in localhds:
1513 1524 if lh in remotehds:
1514 1525 rheads = remotehds[lh]
1515 1526 else:
1516 1527 rheads = []
1517 1528 lheads = localhds[lh]
1518 1529 updatelh = [upd for upd in update
1519 1530 if self[upd].branch() == lh]
1520 1531 if not updatelh:
1521 1532 continue
1522 1533 if not checkbranch(lheads, rheads, updatelh):
1523 1534 return None, 0
1524 1535 else:
1525 1536 if not checkbranch(heads, remote_heads, update):
1526 1537 return None, 0
1527 1538
1528 1539 if inc:
1529 1540 self.ui.warn(_("note: unsynced remote changes!\n"))
1530 1541
1531 1542
1532 1543 if revs is None:
1533 1544 # use the fast path, no race possible on push
1534 1545 cg = self._changegroup(common.keys(), 'push')
1535 1546 else:
1536 1547 cg = self.changegroupsubset(update, revs, 'push')
1537 1548 return cg, remote_heads
1538 1549
1539 1550 def push_addchangegroup(self, remote, force, revs):
1540 1551 lock = remote.lock()
1541 1552 try:
1542 1553 ret = self.prepush(remote, force, revs)
1543 1554 if ret[0] is not None:
1544 1555 cg, remote_heads = ret
1545 1556 return remote.addchangegroup(cg, 'push', self.url())
1546 1557 return ret[1]
1547 1558 finally:
1548 1559 lock.release()
1549 1560
1550 1561 def push_unbundle(self, remote, force, revs):
1551 1562 # local repo finds heads on server, finds out what revs it
1552 1563 # must push. once revs transferred, if server finds it has
1553 1564 # different heads (someone else won commit/push race), server
1554 1565 # aborts.
1555 1566
1556 1567 ret = self.prepush(remote, force, revs)
1557 1568 if ret[0] is not None:
1558 1569 cg, remote_heads = ret
1559 1570 if force: remote_heads = ['force']
1560 1571 return remote.unbundle(cg, remote_heads, 'push')
1561 1572 return ret[1]
1562 1573
1563 1574 def changegroupinfo(self, nodes, source):
1564 1575 if self.ui.verbose or source == 'bundle':
1565 1576 self.ui.status(_("%d changesets found\n") % len(nodes))
1566 1577 if self.ui.debugflag:
1567 1578 self.ui.debug(_("list of changesets:\n"))
1568 1579 for node in nodes:
1569 1580 self.ui.debug("%s\n" % hex(node))
1570 1581
1571 1582 def changegroupsubset(self, bases, heads, source, extranodes=None):
1572 1583 """This function generates a changegroup consisting of all the nodes
1573 1584 that are descendents of any of the bases, and ancestors of any of
1574 1585 the heads.
1575 1586
1576 1587 It is fairly complex as determining which filenodes and which
1577 1588 manifest nodes need to be included for the changeset to be complete
1578 1589 is non-trivial.
1579 1590
1580 1591 Another wrinkle is doing the reverse, figuring out which changeset in
1581 1592 the changegroup a particular filenode or manifestnode belongs to.
1582 1593
1583 1594 The caller can specify some nodes that must be included in the
1584 1595 changegroup using the extranodes argument. It should be a dict
1585 1596 where the keys are the filenames (or 1 for the manifest), and the
1586 1597 values are lists of (node, linknode) tuples, where node is a wanted
1587 1598 node and linknode is the changelog node that should be transmitted as
1588 1599 the linkrev.
1589 1600 """
1590 1601
1591 1602 if extranodes is None:
1592 1603 # can we go through the fast path ?
1593 1604 heads.sort()
1594 1605 allheads = self.heads()
1595 1606 allheads.sort()
1596 1607 if heads == allheads:
1597 1608 common = []
1598 1609 # parents of bases are known from both sides
1599 1610 for n in bases:
1600 1611 for p in self.changelog.parents(n):
1601 1612 if p != nullid:
1602 1613 common.append(p)
1603 1614 return self._changegroup(common, source)
1604 1615
1605 1616 self.hook('preoutgoing', throw=True, source=source)
1606 1617
1607 1618 # Set up some initial variables
1608 1619 # Make it easy to refer to self.changelog
1609 1620 cl = self.changelog
1610 1621 # msng is short for missing - compute the list of changesets in this
1611 1622 # changegroup.
1612 1623 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1613 1624 self.changegroupinfo(msng_cl_lst, source)
1614 1625 # Some bases may turn out to be superfluous, and some heads may be
1615 1626 # too. nodesbetween will return the minimal set of bases and heads
1616 1627 # necessary to re-create the changegroup.
1617 1628
1618 1629 # Known heads are the list of heads that it is assumed the recipient
1619 1630 # of this changegroup will know about.
1620 1631 knownheads = set()
1621 1632 # We assume that all parents of bases are known heads.
1622 1633 for n in bases:
1623 1634 knownheads.update(cl.parents(n))
1624 1635 knownheads.discard(nullid)
1625 1636 knownheads = list(knownheads)
1626 1637 if knownheads:
1627 1638 # Now that we know what heads are known, we can compute which
1628 1639 # changesets are known. The recipient must know about all
1629 1640 # changesets required to reach the known heads from the null
1630 1641 # changeset.
1631 1642 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 1643 junk = None
1633 1644 # Transform the list into a set.
1634 1645 has_cl_set = set(has_cl_set)
1635 1646 else:
1636 1647 # If there were no known heads, the recipient cannot be assumed to
1637 1648 # know about any changesets.
1638 1649 has_cl_set = set()
1639 1650
1640 1651 # Make it easy to refer to self.manifest
1641 1652 mnfst = self.manifest
1642 1653 # We don't know which manifests are missing yet
1643 1654 msng_mnfst_set = {}
1644 1655 # Nor do we know which filenodes are missing.
1645 1656 msng_filenode_set = {}
1646 1657
1647 1658 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1648 1659 junk = None
1649 1660
1650 1661 # A changeset always belongs to itself, so the changenode lookup
1651 1662 # function for a changenode is identity.
1652 1663 def identity(x):
1653 1664 return x
1654 1665
1655 1666 # If we determine that a particular file or manifest node must be a
1656 1667 # node that the recipient of the changegroup will already have, we can
1657 1668 # also assume the recipient will have all the parents. This function
1658 1669 # prunes them from the set of missing nodes.
1659 1670 def prune_parents(revlog, hasset, msngset):
1660 1671 haslst = list(hasset)
1661 1672 haslst.sort(key=revlog.rev)
1662 1673 for node in haslst:
1663 1674 parentlst = [p for p in revlog.parents(node) if p != nullid]
1664 1675 while parentlst:
1665 1676 n = parentlst.pop()
1666 1677 if n not in hasset:
1667 1678 hasset.add(n)
1668 1679 p = [p for p in revlog.parents(n) if p != nullid]
1669 1680 parentlst.extend(p)
1670 1681 for n in hasset:
1671 1682 msngset.pop(n, None)
1672 1683
1673 1684 # This is a function generating function used to set up an environment
1674 1685 # for the inner function to execute in.
1675 1686 def manifest_and_file_collector(changedfileset):
1676 1687 # This is an information gathering function that gathers
1677 1688 # information from each changeset node that goes out as part of
1678 1689 # the changegroup. The information gathered is a list of which
1679 1690 # manifest nodes are potentially required (the recipient may
1680 1691 # already have them) and total list of all files which were
1681 1692 # changed in any changeset in the changegroup.
1682 1693 #
1683 1694 # We also remember the first changenode we saw any manifest
1684 1695 # referenced by so we can later determine which changenode 'owns'
1685 1696 # the manifest.
1686 1697 def collect_manifests_and_files(clnode):
1687 1698 c = cl.read(clnode)
1688 1699 for f in c[3]:
1689 1700 # This is to make sure we only have one instance of each
1690 1701 # filename string for each filename.
1691 1702 changedfileset.setdefault(f, f)
1692 1703 msng_mnfst_set.setdefault(c[0], clnode)
1693 1704 return collect_manifests_and_files
1694 1705
1695 1706 # Figure out which manifest nodes (of the ones we think might be part
1696 1707 # of the changegroup) the recipient must know about and remove them
1697 1708 # from the changegroup.
1698 1709 def prune_manifests():
1699 1710 has_mnfst_set = set()
1700 1711 for n in msng_mnfst_set:
1701 1712 # If a 'missing' manifest thinks it belongs to a changenode
1702 1713 # the recipient is assumed to have, obviously the recipient
1703 1714 # must have that manifest.
1704 1715 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1705 1716 if linknode in has_cl_set:
1706 1717 has_mnfst_set.add(n)
1707 1718 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1708 1719
1709 1720 # Use the information collected in collect_manifests_and_files to say
1710 1721 # which changenode any manifestnode belongs to.
1711 1722 def lookup_manifest_link(mnfstnode):
1712 1723 return msng_mnfst_set[mnfstnode]
1713 1724
1714 1725 # A function generating function that sets up the initial environment
1715 1726 # the inner function.
1716 1727 def filenode_collector(changedfiles):
1717 1728 next_rev = [0]
1718 1729 # This gathers information from each manifestnode included in the
1719 1730 # changegroup about which filenodes the manifest node references
1720 1731 # so we can include those in the changegroup too.
1721 1732 #
1722 1733 # It also remembers which changenode each filenode belongs to. It
1723 1734 # does this by assuming the a filenode belongs to the changenode
1724 1735 # the first manifest that references it belongs to.
1725 1736 def collect_msng_filenodes(mnfstnode):
1726 1737 r = mnfst.rev(mnfstnode)
1727 1738 if r == next_rev[0]:
1728 1739 # If the last rev we looked at was the one just previous,
1729 1740 # we only need to see a diff.
1730 1741 deltamf = mnfst.readdelta(mnfstnode)
1731 1742 # For each line in the delta
1732 1743 for f, fnode in deltamf.iteritems():
1733 1744 f = changedfiles.get(f, None)
1734 1745 # And if the file is in the list of files we care
1735 1746 # about.
1736 1747 if f is not None:
1737 1748 # Get the changenode this manifest belongs to
1738 1749 clnode = msng_mnfst_set[mnfstnode]
1739 1750 # Create the set of filenodes for the file if
1740 1751 # there isn't one already.
1741 1752 ndset = msng_filenode_set.setdefault(f, {})
1742 1753 # And set the filenode's changelog node to the
1743 1754 # manifest's if it hasn't been set already.
1744 1755 ndset.setdefault(fnode, clnode)
1745 1756 else:
1746 1757 # Otherwise we need a full manifest.
1747 1758 m = mnfst.read(mnfstnode)
1748 1759 # For every file in we care about.
1749 1760 for f in changedfiles:
1750 1761 fnode = m.get(f, None)
1751 1762 # If it's in the manifest
1752 1763 if fnode is not None:
1753 1764 # See comments above.
1754 1765 clnode = msng_mnfst_set[mnfstnode]
1755 1766 ndset = msng_filenode_set.setdefault(f, {})
1756 1767 ndset.setdefault(fnode, clnode)
1757 1768 # Remember the revision we hope to see next.
1758 1769 next_rev[0] = r + 1
1759 1770 return collect_msng_filenodes
1760 1771
1761 1772 # We have a list of filenodes we think we need for a file, lets remove
1762 1773 # all those we know the recipient must have.
1763 1774 def prune_filenodes(f, filerevlog):
1764 1775 msngset = msng_filenode_set[f]
1765 1776 hasset = set()
1766 1777 # If a 'missing' filenode thinks it belongs to a changenode we
1767 1778 # assume the recipient must have, then the recipient must have
1768 1779 # that filenode.
1769 1780 for n in msngset:
1770 1781 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1771 1782 if clnode in has_cl_set:
1772 1783 hasset.add(n)
1773 1784 prune_parents(filerevlog, hasset, msngset)
1774 1785
1775 1786 # A function generator function that sets up the a context for the
1776 1787 # inner function.
1777 1788 def lookup_filenode_link_func(fname):
1778 1789 msngset = msng_filenode_set[fname]
1779 1790 # Lookup the changenode the filenode belongs to.
1780 1791 def lookup_filenode_link(fnode):
1781 1792 return msngset[fnode]
1782 1793 return lookup_filenode_link
1783 1794
1784 1795 # Add the nodes that were explicitly requested.
1785 1796 def add_extra_nodes(name, nodes):
1786 1797 if not extranodes or name not in extranodes:
1787 1798 return
1788 1799
1789 1800 for node, linknode in extranodes[name]:
1790 1801 if node not in nodes:
1791 1802 nodes[node] = linknode
1792 1803
1793 1804 # Now that we have all theses utility functions to help out and
1794 1805 # logically divide up the task, generate the group.
1795 1806 def gengroup():
1796 1807 # The set of changed files starts empty.
1797 1808 changedfiles = {}
1798 1809 # Create a changenode group generator that will call our functions
1799 1810 # back to lookup the owning changenode and collect information.
1800 1811 group = cl.group(msng_cl_lst, identity,
1801 1812 manifest_and_file_collector(changedfiles))
1802 1813 for chnk in group:
1803 1814 yield chnk
1804 1815
1805 1816 # The list of manifests has been collected by the generator
1806 1817 # calling our functions back.
1807 1818 prune_manifests()
1808 1819 add_extra_nodes(1, msng_mnfst_set)
1809 1820 msng_mnfst_lst = msng_mnfst_set.keys()
1810 1821 # Sort the manifestnodes by revision number.
1811 1822 msng_mnfst_lst.sort(key=mnfst.rev)
1812 1823 # Create a generator for the manifestnodes that calls our lookup
1813 1824 # and data collection functions back.
1814 1825 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1815 1826 filenode_collector(changedfiles))
1816 1827 for chnk in group:
1817 1828 yield chnk
1818 1829
1819 1830 # These are no longer needed, dereference and toss the memory for
1820 1831 # them.
1821 1832 msng_mnfst_lst = None
1822 1833 msng_mnfst_set.clear()
1823 1834
1824 1835 if extranodes:
1825 1836 for fname in extranodes:
1826 1837 if isinstance(fname, int):
1827 1838 continue
1828 1839 msng_filenode_set.setdefault(fname, {})
1829 1840 changedfiles[fname] = 1
1830 1841 # Go through all our files in order sorted by name.
1831 1842 for fname in sorted(changedfiles):
1832 1843 filerevlog = self.file(fname)
1833 1844 if not len(filerevlog):
1834 1845 raise util.Abort(_("empty or missing revlog for %s") % fname)
1835 1846 # Toss out the filenodes that the recipient isn't really
1836 1847 # missing.
1837 1848 if fname in msng_filenode_set:
1838 1849 prune_filenodes(fname, filerevlog)
1839 1850 add_extra_nodes(fname, msng_filenode_set[fname])
1840 1851 msng_filenode_lst = msng_filenode_set[fname].keys()
1841 1852 else:
1842 1853 msng_filenode_lst = []
1843 1854 # If any filenodes are left, generate the group for them,
1844 1855 # otherwise don't bother.
1845 1856 if len(msng_filenode_lst) > 0:
1846 1857 yield changegroup.chunkheader(len(fname))
1847 1858 yield fname
1848 1859 # Sort the filenodes by their revision #
1849 1860 msng_filenode_lst.sort(key=filerevlog.rev)
1850 1861 # Create a group generator and only pass in a changenode
1851 1862 # lookup function as we need to collect no information
1852 1863 # from filenodes.
1853 1864 group = filerevlog.group(msng_filenode_lst,
1854 1865 lookup_filenode_link_func(fname))
1855 1866 for chnk in group:
1856 1867 yield chnk
1857 1868 if fname in msng_filenode_set:
1858 1869 # Don't need this anymore, toss it to free memory.
1859 1870 del msng_filenode_set[fname]
1860 1871 # Signal that no more groups are left.
1861 1872 yield changegroup.closechunk()
1862 1873
1863 1874 if msng_cl_lst:
1864 1875 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1865 1876
1866 1877 return util.chunkbuffer(gengroup())
1867 1878
1868 1879 def changegroup(self, basenodes, source):
1869 1880 # to avoid a race we use changegroupsubset() (issue1320)
1870 1881 return self.changegroupsubset(basenodes, self.heads(), source)
1871 1882
1872 1883 def _changegroup(self, common, source):
1873 1884 """Generate a changegroup of all nodes that we have that a recipient
1874 1885 doesn't.
1875 1886
1876 1887 This is much easier than the previous function as we can assume that
1877 1888 the recipient has any changenode we aren't sending them.
1878 1889
1879 1890 common is the set of common nodes between remote and self"""
1880 1891
1881 1892 self.hook('preoutgoing', throw=True, source=source)
1882 1893
1883 1894 cl = self.changelog
1884 1895 nodes = cl.findmissing(common)
1885 1896 revset = set([cl.rev(n) for n in nodes])
1886 1897 self.changegroupinfo(nodes, source)
1887 1898
1888 1899 def identity(x):
1889 1900 return x
1890 1901
1891 1902 def gennodelst(log):
1892 1903 for r in log:
1893 1904 if log.linkrev(r) in revset:
1894 1905 yield log.node(r)
1895 1906
1896 1907 def changed_file_collector(changedfileset):
1897 1908 def collect_changed_files(clnode):
1898 1909 c = cl.read(clnode)
1899 1910 changedfileset.update(c[3])
1900 1911 return collect_changed_files
1901 1912
1902 1913 def lookuprevlink_func(revlog):
1903 1914 def lookuprevlink(n):
1904 1915 return cl.node(revlog.linkrev(revlog.rev(n)))
1905 1916 return lookuprevlink
1906 1917
1907 1918 def gengroup():
1908 1919 # construct a list of all changed files
1909 1920 changedfiles = set()
1910 1921
1911 1922 for chnk in cl.group(nodes, identity,
1912 1923 changed_file_collector(changedfiles)):
1913 1924 yield chnk
1914 1925
1915 1926 mnfst = self.manifest
1916 1927 nodeiter = gennodelst(mnfst)
1917 1928 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1918 1929 yield chnk
1919 1930
1920 1931 for fname in sorted(changedfiles):
1921 1932 filerevlog = self.file(fname)
1922 1933 if not len(filerevlog):
1923 1934 raise util.Abort(_("empty or missing revlog for %s") % fname)
1924 1935 nodeiter = gennodelst(filerevlog)
1925 1936 nodeiter = list(nodeiter)
1926 1937 if nodeiter:
1927 1938 yield changegroup.chunkheader(len(fname))
1928 1939 yield fname
1929 1940 lookup = lookuprevlink_func(filerevlog)
1930 1941 for chnk in filerevlog.group(nodeiter, lookup):
1931 1942 yield chnk
1932 1943
1933 1944 yield changegroup.closechunk()
1934 1945
1935 1946 if nodes:
1936 1947 self.hook('outgoing', node=hex(nodes[0]), source=source)
1937 1948
1938 1949 return util.chunkbuffer(gengroup())
1939 1950
1940 1951 def addchangegroup(self, source, srctype, url, emptyok=False):
1941 1952 """add changegroup to repo.
1942 1953
1943 1954 return values:
1944 1955 - nothing changed or no source: 0
1945 1956 - more heads than before: 1+added heads (2..n)
1946 1957 - less heads than before: -1-removed heads (-2..-n)
1947 1958 - number of heads stays the same: 1
1948 1959 """
1949 1960 def csmap(x):
1950 1961 self.ui.debug(_("add changeset %s\n") % short(x))
1951 1962 return len(cl)
1952 1963
1953 1964 def revmap(x):
1954 1965 return cl.rev(x)
1955 1966
1956 1967 if not source:
1957 1968 return 0
1958 1969
1959 1970 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1960 1971
1961 1972 changesets = files = revisions = 0
1962 1973
1963 1974 # write changelog data to temp files so concurrent readers will not see
1964 1975 # inconsistent view
1965 1976 cl = self.changelog
1966 1977 cl.delayupdate()
1967 1978 oldheads = len(cl.heads())
1968 1979
1969 1980 tr = self.transaction()
1970 1981 try:
1971 1982 trp = weakref.proxy(tr)
1972 1983 # pull off the changeset group
1973 1984 self.ui.status(_("adding changesets\n"))
1974 1985 clstart = len(cl)
1975 1986 chunkiter = changegroup.chunkiter(source)
1976 1987 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1977 1988 raise util.Abort(_("received changelog group is empty"))
1978 1989 clend = len(cl)
1979 1990 changesets = clend - clstart
1980 1991
1981 1992 # pull off the manifest group
1982 1993 self.ui.status(_("adding manifests\n"))
1983 1994 chunkiter = changegroup.chunkiter(source)
1984 1995 # no need to check for empty manifest group here:
1985 1996 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1986 1997 # no new manifest will be created and the manifest group will
1987 1998 # be empty during the pull
1988 1999 self.manifest.addgroup(chunkiter, revmap, trp)
1989 2000
1990 2001 # process the files
1991 2002 self.ui.status(_("adding file changes\n"))
1992 2003 while 1:
1993 2004 f = changegroup.getchunk(source)
1994 2005 if not f:
1995 2006 break
1996 2007 self.ui.debug(_("adding %s revisions\n") % f)
1997 2008 fl = self.file(f)
1998 2009 o = len(fl)
1999 2010 chunkiter = changegroup.chunkiter(source)
2000 2011 if fl.addgroup(chunkiter, revmap, trp) is None:
2001 2012 raise util.Abort(_("received file revlog group is empty"))
2002 2013 revisions += len(fl) - o
2003 2014 files += 1
2004 2015
2005 2016 newheads = len(cl.heads())
2006 2017 heads = ""
2007 2018 if oldheads and newheads != oldheads:
2008 2019 heads = _(" (%+d heads)") % (newheads - oldheads)
2009 2020
2010 2021 self.ui.status(_("added %d changesets"
2011 2022 " with %d changes to %d files%s\n")
2012 2023 % (changesets, revisions, files, heads))
2013 2024
2014 2025 if changesets > 0:
2015 2026 p = lambda: cl.writepending() and self.root or ""
2016 2027 self.hook('pretxnchangegroup', throw=True,
2017 2028 node=hex(cl.node(clstart)), source=srctype,
2018 2029 url=url, pending=p)
2019 2030
2020 2031 # make changelog see real files again
2021 2032 cl.finalize(trp)
2022 2033
2023 2034 tr.close()
2024 2035 finally:
2025 2036 del tr
2026 2037
2027 2038 if changesets > 0:
2028 2039 # forcefully update the on-disk branch cache
2029 2040 self.ui.debug(_("updating the branch cache\n"))
2030 2041 self.branchtags()
2031 2042 self.hook("changegroup", node=hex(cl.node(clstart)),
2032 2043 source=srctype, url=url)
2033 2044
2034 2045 for i in xrange(clstart, clend):
2035 2046 self.hook("incoming", node=hex(cl.node(i)),
2036 2047 source=srctype, url=url)
2037 2048
2038 2049 # never return 0 here:
2039 2050 if newheads < oldheads:
2040 2051 return newheads - oldheads - 1
2041 2052 else:
2042 2053 return newheads - oldheads + 1
2043 2054
2044 2055
2045 2056 def stream_in(self, remote):
2046 2057 fp = remote.stream_out()
2047 2058 l = fp.readline()
2048 2059 try:
2049 2060 resp = int(l)
2050 2061 except ValueError:
2051 2062 raise error.ResponseError(
2052 2063 _('Unexpected response from remote server:'), l)
2053 2064 if resp == 1:
2054 2065 raise util.Abort(_('operation forbidden by server'))
2055 2066 elif resp == 2:
2056 2067 raise util.Abort(_('locking the remote repository failed'))
2057 2068 elif resp != 0:
2058 2069 raise util.Abort(_('the server sent an unknown error code'))
2059 2070 self.ui.status(_('streaming all changes\n'))
2060 2071 l = fp.readline()
2061 2072 try:
2062 2073 total_files, total_bytes = map(int, l.split(' ', 1))
2063 2074 except (ValueError, TypeError):
2064 2075 raise error.ResponseError(
2065 2076 _('Unexpected response from remote server:'), l)
2066 2077 self.ui.status(_('%d files to transfer, %s of data\n') %
2067 2078 (total_files, util.bytecount(total_bytes)))
2068 2079 start = time.time()
2069 2080 for i in xrange(total_files):
2070 2081 # XXX doesn't support '\n' or '\r' in filenames
2071 2082 l = fp.readline()
2072 2083 try:
2073 2084 name, size = l.split('\0', 1)
2074 2085 size = int(size)
2075 2086 except (ValueError, TypeError):
2076 2087 raise error.ResponseError(
2077 2088 _('Unexpected response from remote server:'), l)
2078 2089 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2079 2090 # for backwards compat, name was partially encoded
2080 2091 ofp = self.sopener(store.decodedir(name), 'w')
2081 2092 for chunk in util.filechunkiter(fp, limit=size):
2082 2093 ofp.write(chunk)
2083 2094 ofp.close()
2084 2095 elapsed = time.time() - start
2085 2096 if elapsed <= 0:
2086 2097 elapsed = 0.001
2087 2098 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2088 2099 (util.bytecount(total_bytes), elapsed,
2089 2100 util.bytecount(total_bytes / elapsed)))
2090 2101 self.invalidate()
2091 2102 return len(self.heads()) + 1
2092 2103
2093 2104 def clone(self, remote, heads=[], stream=False):
2094 2105 '''clone remote repository.
2095 2106
2096 2107 keyword arguments:
2097 2108 heads: list of revs to clone (forces use of pull)
2098 2109 stream: use streaming clone if possible'''
2099 2110
2100 2111 # now, all clients that can request uncompressed clones can
2101 2112 # read repo formats supported by all servers that can serve
2102 2113 # them.
2103 2114
2104 2115 # if revlog format changes, client will have to check version
2105 2116 # and format flags on "stream" capability, and use
2106 2117 # uncompressed only if compatible.
2107 2118
2108 2119 if stream and not heads and remote.capable('stream'):
2109 2120 return self.stream_in(remote)
2110 2121 return self.pull(remote, heads)
2111 2122
2112 2123 # used to avoid circular references so destructors work
2113 2124 def aftertrans(files):
2114 2125 renamefiles = [tuple(t) for t in files]
2115 2126 def a():
2116 2127 for src, dest in renamefiles:
2117 2128 util.rename(src, dest)
2118 2129 return a
2119 2130
2120 2131 def instance(ui, path, create):
2121 2132 return localrepository(ui, util.drop_scheme('file', path), create)
2122 2133
2123 2134 def islocal(path):
2124 2135 return True
@@ -1,144 +1,145 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2, incorporated herein by reference.
8 8
9 9 import changegroup
10 10 from node import nullrev, short
11 11 from i18n import _
12 12 import os
13 13
14 14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
15 15 """create a bundle with the specified revisions as a backup"""
16 16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
17 17 backupdir = repo.join("strip-backup")
18 18 if not os.path.isdir(backupdir):
19 19 os.mkdir(backupdir)
20 20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
21 21 repo.ui.warn(_("saving bundle to %s\n") % name)
22 22 return changegroup.writebundle(cg, name, "HG10BZ")
23 23
24 24 def _collectfiles(repo, striprev):
25 25 """find out the filelogs affected by the strip"""
26 26 files = set()
27 27
28 28 for x in xrange(striprev, len(repo)):
29 29 files.update(repo[x].files())
30 30
31 31 return sorted(files)
32 32
33 33 def _collectextranodes(repo, files, link):
34 34 """return the nodes that have to be saved before the strip"""
35 35 def collectone(revlog):
36 36 extra = []
37 37 startrev = count = len(revlog)
38 38 # find the truncation point of the revlog
39 39 for i in xrange(count):
40 40 lrev = revlog.linkrev(i)
41 41 if lrev >= link:
42 42 startrev = i + 1
43 43 break
44 44
45 45 # see if any revision after that point has a linkrev less than link
46 46 # (we have to manually save these guys)
47 47 for i in xrange(startrev, count):
48 48 node = revlog.node(i)
49 49 lrev = revlog.linkrev(i)
50 50 if lrev < link:
51 51 extra.append((node, cl.node(lrev)))
52 52
53 53 return extra
54 54
55 55 extranodes = {}
56 56 cl = repo.changelog
57 57 extra = collectone(repo.manifest)
58 58 if extra:
59 59 extranodes[1] = extra
60 60 for fname in files:
61 61 f = repo.file(fname)
62 62 extra = collectone(f)
63 63 if extra:
64 64 extranodes[fname] = extra
65 65
66 66 return extranodes
67 67
68 68 def strip(ui, repo, node, backup="all"):
69 69 cl = repo.changelog
70 70 # TODO delete the undo files, and handle undo of merge sets
71 71 striprev = cl.rev(node)
72 72
73 73 # Some revisions with rev > striprev may not be descendants of striprev.
74 74 # We have to find these revisions and put them in a bundle, so that
75 75 # we can restore them after the truncations.
76 76 # To create the bundle we use repo.changegroupsubset which requires
77 77 # the list of heads and bases of the set of interesting revisions.
78 78 # (head = revision in the set that has no descendant in the set;
79 79 # base = revision in the set that has no ancestor in the set)
80 80 tostrip = set((striprev,))
81 81 saveheads = set()
82 82 savebases = []
83 83 for r in xrange(striprev + 1, len(cl)):
84 84 parents = cl.parentrevs(r)
85 85 if parents[0] in tostrip or parents[1] in tostrip:
86 86 # r is a descendant of striprev
87 87 tostrip.add(r)
88 88 # if this is a merge and one of the parents does not descend
89 89 # from striprev, mark that parent as a savehead.
90 90 if parents[1] != nullrev:
91 91 for p in parents:
92 92 if p not in tostrip and p > striprev:
93 93 saveheads.add(p)
94 94 else:
95 95 # if no parents of this revision will be stripped, mark it as
96 96 # a savebase
97 97 if parents[0] < striprev and parents[1] < striprev:
98 98 savebases.append(cl.node(r))
99 99
100 100 saveheads.difference_update(parents)
101 101 saveheads.add(r)
102 102
103 103 saveheads = [cl.node(r) for r in saveheads]
104 104 files = _collectfiles(repo, striprev)
105 105
106 106 extranodes = _collectextranodes(repo, files, striprev)
107 107
108 108 # create a changegroup for all the branches we need to keep
109 109 if backup == "all":
110 110 _bundle(repo, [node], cl.heads(), node, 'backup')
111 111 if saveheads or extranodes:
112 112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
113 113 extranodes)
114 114
115 115 mfst = repo.manifest
116 116
117 117 tr = repo.transaction()
118 118 offset = len(tr.entries)
119 119
120 120 tr.startgroup()
121 121 cl.strip(striprev, tr)
122 122 mfst.strip(striprev, tr)
123 123 for fn in files:
124 124 repo.file(fn).strip(striprev, tr)
125 125 tr.endgroup()
126 126
127 127 try:
128 128 for i in xrange(offset, len(tr.entries)):
129 129 file, troffset, ignore = tr.entries[i]
130 130 repo.sopener(file, 'a').truncate(troffset)
131 131 tr.close()
132 132 except:
133 133 tr.abort()
134 134 raise
135 135
136 136 if saveheads or extranodes:
137 137 ui.status(_("adding branch\n"))
138 138 f = open(chgrpfile, "rb")
139 139 gen = changegroup.readbundle(f, chgrpfile)
140 140 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
141 141 f.close()
142 142 if backup != "strip":
143 143 os.unlink(chgrpfile)
144 144
145 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now