##// END OF EJS Templates
localrepo: introduce method for explicit branch cache update...
Georg Brandl -
r12066:d01e2865 default
parent child Browse files
Show More
@@ -1,1803 +1,1805 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25 25
26 26 def __init__(self, baseui, path=None, create=0):
27 27 repo.repository.__init__(self)
28 28 self.root = os.path.realpath(util.expandpath(path))
29 29 self.path = os.path.join(self.root, ".hg")
30 30 self.origroot = path
31 31 self.opener = util.opener(self.path)
32 32 self.wopener = util.opener(self.root)
33 33 self.baseui = baseui
34 34 self.ui = baseui.copy()
35 35
36 36 try:
37 37 self.ui.readconfig(self.join("hgrc"), self.root)
38 38 extensions.loadall(self.ui)
39 39 except IOError:
40 40 pass
41 41
42 42 if not os.path.isdir(self.path):
43 43 if create:
44 44 if not os.path.exists(path):
45 45 util.makedirs(path)
46 46 os.mkdir(self.path)
47 47 requirements = ["revlogv1"]
48 48 if self.ui.configbool('format', 'usestore', True):
49 49 os.mkdir(os.path.join(self.path, "store"))
50 50 requirements.append("store")
51 51 if self.ui.configbool('format', 'usefncache', True):
52 52 requirements.append("fncache")
53 53 # create an invalid changelog
54 54 self.opener("00changelog.i", "a").write(
55 55 '\0\0\0\2' # represents revlogv2
56 56 ' dummy changelog to prevent using the old repo layout'
57 57 )
58 58 if self.ui.configbool('format', 'parentdelta', False):
59 59 requirements.append("parentdelta")
60 60 reqfile = self.opener("requires", "w")
61 61 for r in requirements:
62 62 reqfile.write("%s\n" % r)
63 63 reqfile.close()
64 64 else:
65 65 raise error.RepoError(_("repository %s not found") % path)
66 66 elif create:
67 67 raise error.RepoError(_("repository %s already exists") % path)
68 68 else:
69 69 # find requirements
70 70 requirements = set()
71 71 try:
72 72 requirements = set(self.opener("requires").read().splitlines())
73 73 except IOError, inst:
74 74 if inst.errno != errno.ENOENT:
75 75 raise
76 76 for r in requirements - self.supported:
77 77 raise error.RepoError(_("requirement '%s' not supported") % r)
78 78
79 79 self.sharedpath = self.path
80 80 try:
81 81 s = os.path.realpath(self.opener("sharedpath").read())
82 82 if not os.path.exists(s):
83 83 raise error.RepoError(
84 84 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 85 self.sharedpath = s
86 86 except IOError, inst:
87 87 if inst.errno != errno.ENOENT:
88 88 raise
89 89
90 90 self.store = store.store(requirements, self.sharedpath, util.opener)
91 91 self.spath = self.store.path
92 92 self.sopener = self.store.opener
93 93 self.sjoin = self.store.join
94 94 self.opener.createmode = self.store.createmode
95 95 self.sopener.options = {}
96 96 if 'parentdelta' in requirements:
97 97 self.sopener.options['parentdelta'] = 1
98 98
99 99 # These two define the set of tags for this repository. _tags
100 100 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 101 # 'local'. (Global tags are defined by .hgtags across all
102 102 # heads, and local tags are defined in .hg/localtags.) They
103 103 # constitute the in-memory cache of tags.
104 104 self._tags = None
105 105 self._tagtypes = None
106 106
107 107 self._branchcache = None # in UTF-8
108 108 self._branchcachetip = None
109 109 self.nodetagscache = None
110 110 self.filterpats = {}
111 111 self._datafilters = {}
112 112 self._transref = self._lockref = self._wlockref = None
113 113
114 114 @propertycache
115 115 def changelog(self):
116 116 c = changelog.changelog(self.sopener)
117 117 if 'HG_PENDING' in os.environ:
118 118 p = os.environ['HG_PENDING']
119 119 if p.startswith(self.root):
120 120 c.readpending('00changelog.i.a')
121 121 self.sopener.options['defversion'] = c.version
122 122 return c
123 123
124 124 @propertycache
125 125 def manifest(self):
126 126 return manifest.manifest(self.sopener)
127 127
128 128 @propertycache
129 129 def dirstate(self):
130 130 return dirstate.dirstate(self.opener, self.ui, self.root)
131 131
132 132 def __getitem__(self, changeid):
133 133 if changeid is None:
134 134 return context.workingctx(self)
135 135 return context.changectx(self, changeid)
136 136
137 137 def __contains__(self, changeid):
138 138 try:
139 139 return bool(self.lookup(changeid))
140 140 except error.RepoLookupError:
141 141 return False
142 142
143 143 def __nonzero__(self):
144 144 return True
145 145
146 146 def __len__(self):
147 147 return len(self.changelog)
148 148
149 149 def __iter__(self):
150 150 for i in xrange(len(self)):
151 151 yield i
152 152
153 153 def url(self):
154 154 return 'file:' + self.root
155 155
156 156 def hook(self, name, throw=False, **args):
157 157 return hook.hook(self.ui, self, name, throw, **args)
158 158
159 159 tag_disallowed = ':\r\n'
160 160
161 161 def _tag(self, names, node, message, local, user, date, extra={}):
162 162 if isinstance(names, str):
163 163 allchars = names
164 164 names = (names,)
165 165 else:
166 166 allchars = ''.join(names)
167 167 for c in self.tag_disallowed:
168 168 if c in allchars:
169 169 raise util.Abort(_('%r cannot be used in a tag name') % c)
170 170
171 171 branches = self.branchmap()
172 172 for name in names:
173 173 self.hook('pretag', throw=True, node=hex(node), tag=name,
174 174 local=local)
175 175 if name in branches:
176 176 self.ui.warn(_("warning: tag %s conflicts with existing"
177 177 " branch name\n") % name)
178 178
179 179 def writetags(fp, names, munge, prevtags):
180 180 fp.seek(0, 2)
181 181 if prevtags and prevtags[-1] != '\n':
182 182 fp.write('\n')
183 183 for name in names:
184 184 m = munge and munge(name) or name
185 185 if self._tagtypes and name in self._tagtypes:
186 186 old = self._tags.get(name, nullid)
187 187 fp.write('%s %s\n' % (hex(old), m))
188 188 fp.write('%s %s\n' % (hex(node), m))
189 189 fp.close()
190 190
191 191 prevtags = ''
192 192 if local:
193 193 try:
194 194 fp = self.opener('localtags', 'r+')
195 195 except IOError:
196 196 fp = self.opener('localtags', 'a')
197 197 else:
198 198 prevtags = fp.read()
199 199
200 200 # local tags are stored in the current charset
201 201 writetags(fp, names, None, prevtags)
202 202 for name in names:
203 203 self.hook('tag', node=hex(node), tag=name, local=local)
204 204 return
205 205
206 206 try:
207 207 fp = self.wfile('.hgtags', 'rb+')
208 208 except IOError:
209 209 fp = self.wfile('.hgtags', 'ab')
210 210 else:
211 211 prevtags = fp.read()
212 212
213 213 # committed tags are stored in UTF-8
214 214 writetags(fp, names, encoding.fromlocal, prevtags)
215 215
216 216 if '.hgtags' not in self.dirstate:
217 217 self[None].add(['.hgtags'])
218 218
219 219 m = matchmod.exact(self.root, '', ['.hgtags'])
220 220 tagnode = self.commit(message, user, date, extra=extra, match=m)
221 221
222 222 for name in names:
223 223 self.hook('tag', node=hex(node), tag=name, local=local)
224 224
225 225 return tagnode
226 226
227 227 def tag(self, names, node, message, local, user, date):
228 228 '''tag a revision with one or more symbolic names.
229 229
230 230 names is a list of strings or, when adding a single tag, names may be a
231 231 string.
232 232
233 233 if local is True, the tags are stored in a per-repository file.
234 234 otherwise, they are stored in the .hgtags file, and a new
235 235 changeset is committed with the change.
236 236
237 237 keyword arguments:
238 238
239 239 local: whether to store tags in non-version-controlled file
240 240 (default False)
241 241
242 242 message: commit message to use if committing
243 243
244 244 user: name of user to use if committing
245 245
246 246 date: date tuple to use if committing'''
247 247
248 248 for x in self.status()[:5]:
249 249 if '.hgtags' in x:
250 250 raise util.Abort(_('working copy of .hgtags is changed '
251 251 '(please commit .hgtags manually)'))
252 252
253 253 self.tags() # instantiate the cache
254 254 self._tag(names, node, message, local, user, date)
255 255
256 256 def tags(self):
257 257 '''return a mapping of tag to node'''
258 258 if self._tags is None:
259 259 (self._tags, self._tagtypes) = self._findtags()
260 260
261 261 return self._tags
262 262
263 263 def _findtags(self):
264 264 '''Do the hard work of finding tags. Return a pair of dicts
265 265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
266 266 maps tag name to a string like \'global\' or \'local\'.
267 267 Subclasses or extensions are free to add their own tags, but
268 268 should be aware that the returned dicts will be retained for the
269 269 duration of the localrepo object.'''
270 270
271 271 # XXX what tagtype should subclasses/extensions use? Currently
272 272 # mq and bookmarks add tags, but do not set the tagtype at all.
273 273 # Should each extension invent its own tag type? Should there
274 274 # be one tagtype for all such "virtual" tags? Or is the status
275 275 # quo fine?
276 276
277 277 alltags = {} # map tag name to (node, hist)
278 278 tagtypes = {}
279 279
280 280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
281 281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
282 282
283 283 # Build the return dicts. Have to re-encode tag names because
284 284 # the tags module always uses UTF-8 (in order not to lose info
285 285 # writing to the cache), but the rest of Mercurial wants them in
286 286 # local encoding.
287 287 tags = {}
288 288 for (name, (node, hist)) in alltags.iteritems():
289 289 if node != nullid:
290 290 tags[encoding.tolocal(name)] = node
291 291 tags['tip'] = self.changelog.tip()
292 292 tagtypes = dict([(encoding.tolocal(name), value)
293 293 for (name, value) in tagtypes.iteritems()])
294 294 return (tags, tagtypes)
295 295
296 296 def tagtype(self, tagname):
297 297 '''
298 298 return the type of the given tag. result can be:
299 299
300 300 'local' : a local tag
301 301 'global' : a global tag
302 302 None : tag does not exist
303 303 '''
304 304
305 305 self.tags()
306 306
307 307 return self._tagtypes.get(tagname)
308 308
309 309 def tagslist(self):
310 310 '''return a list of tags ordered by revision'''
311 311 l = []
312 312 for t, n in self.tags().iteritems():
313 313 try:
314 314 r = self.changelog.rev(n)
315 315 except:
316 316 r = -2 # sort to the beginning of the list if unknown
317 317 l.append((r, t, n))
318 318 return [(t, n) for r, t, n in sorted(l)]
319 319
320 320 def nodetags(self, node):
321 321 '''return the tags associated with a node'''
322 322 if not self.nodetagscache:
323 323 self.nodetagscache = {}
324 324 for t, n in self.tags().iteritems():
325 325 self.nodetagscache.setdefault(n, []).append(t)
326 326 for tags in self.nodetagscache.itervalues():
327 327 tags.sort()
328 328 return self.nodetagscache.get(node, [])
329 329
330 330 def _branchtags(self, partial, lrev):
331 331 # TODO: rename this function?
332 332 tiprev = len(self) - 1
333 333 if lrev != tiprev:
334 334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
335 335 self._updatebranchcache(partial, ctxgen)
336 336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
337 337
338 338 return partial
339 339
340 def branchmap(self):
341 '''returns a dictionary {branch: [branchheads]}'''
340 def updatebranchcache(self):
342 341 tip = self.changelog.tip()
343 342 if self._branchcache is not None and self._branchcachetip == tip:
344 343 return self._branchcache
345 344
346 345 oldtip = self._branchcachetip
347 346 self._branchcachetip = tip
348 347 if oldtip is None or oldtip not in self.changelog.nodemap:
349 348 partial, last, lrev = self._readbranchcache()
350 349 else:
351 350 lrev = self.changelog.rev(oldtip)
352 351 partial = self._branchcache
353 352
354 353 self._branchtags(partial, lrev)
355 354 # this private cache holds all heads (not just tips)
356 355 self._branchcache = partial
357 356
357 def branchmap(self):
358 '''returns a dictionary {branch: [branchheads]}'''
359 self.updatebranchcache()
358 360 return self._branchcache
359 361
360 362 def branchtags(self):
361 363 '''return a dict where branch names map to the tipmost head of
362 364 the branch, open heads come before closed'''
363 365 bt = {}
364 366 for bn, heads in self.branchmap().iteritems():
365 367 tip = heads[-1]
366 368 for h in reversed(heads):
367 369 if 'close' not in self.changelog.read(h)[5]:
368 370 tip = h
369 371 break
370 372 bt[bn] = tip
371 373 return bt
372 374
373 375
374 376 def _readbranchcache(self):
375 377 partial = {}
376 378 try:
377 379 f = self.opener("branchheads.cache")
378 380 lines = f.read().split('\n')
379 381 f.close()
380 382 except (IOError, OSError):
381 383 return {}, nullid, nullrev
382 384
383 385 try:
384 386 last, lrev = lines.pop(0).split(" ", 1)
385 387 last, lrev = bin(last), int(lrev)
386 388 if lrev >= len(self) or self[lrev].node() != last:
387 389 # invalidate the cache
388 390 raise ValueError('invalidating branch cache (tip differs)')
389 391 for l in lines:
390 392 if not l:
391 393 continue
392 394 node, label = l.split(" ", 1)
393 395 partial.setdefault(label.strip(), []).append(bin(node))
394 396 except KeyboardInterrupt:
395 397 raise
396 398 except Exception, inst:
397 399 if self.ui.debugflag:
398 400 self.ui.warn(str(inst), '\n')
399 401 partial, last, lrev = {}, nullid, nullrev
400 402 return partial, last, lrev
401 403
402 404 def _writebranchcache(self, branches, tip, tiprev):
403 405 try:
404 406 f = self.opener("branchheads.cache", "w", atomictemp=True)
405 407 f.write("%s %s\n" % (hex(tip), tiprev))
406 408 for label, nodes in branches.iteritems():
407 409 for node in nodes:
408 410 f.write("%s %s\n" % (hex(node), label))
409 411 f.rename()
410 412 except (IOError, OSError):
411 413 pass
412 414
413 415 def _updatebranchcache(self, partial, ctxgen):
414 416 # collect new branch entries
415 417 newbranches = {}
416 418 for c in ctxgen:
417 419 newbranches.setdefault(c.branch(), []).append(c.node())
418 420 # if older branchheads are reachable from new ones, they aren't
419 421 # really branchheads. Note checking parents is insufficient:
420 422 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
421 423 for branch, newnodes in newbranches.iteritems():
422 424 bheads = partial.setdefault(branch, [])
423 425 bheads.extend(newnodes)
424 426 if len(bheads) <= 1:
425 427 continue
426 428 # starting from tip means fewer passes over reachable
427 429 while newnodes:
428 430 latest = newnodes.pop()
429 431 if latest not in bheads:
430 432 continue
431 433 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
432 434 reachable = self.changelog.reachable(latest, minbhrev)
433 435 reachable.remove(latest)
434 436 bheads = [b for b in bheads if b not in reachable]
435 437 partial[branch] = bheads
436 438
437 439 def lookup(self, key):
438 440 if isinstance(key, int):
439 441 return self.changelog.node(key)
440 442 elif key == '.':
441 443 return self.dirstate.parents()[0]
442 444 elif key == 'null':
443 445 return nullid
444 446 elif key == 'tip':
445 447 return self.changelog.tip()
446 448 n = self.changelog._match(key)
447 449 if n:
448 450 return n
449 451 if key in self.tags():
450 452 return self.tags()[key]
451 453 if key in self.branchtags():
452 454 return self.branchtags()[key]
453 455 n = self.changelog._partialmatch(key)
454 456 if n:
455 457 return n
456 458
457 459 # can't find key, check if it might have come from damaged dirstate
458 460 if key in self.dirstate.parents():
459 461 raise error.Abort(_("working directory has unknown parent '%s'!")
460 462 % short(key))
461 463 try:
462 464 if len(key) == 20:
463 465 key = hex(key)
464 466 except:
465 467 pass
466 468 raise error.RepoLookupError(_("unknown revision '%s'") % key)
467 469
468 470 def lookupbranch(self, key, remote=None):
469 471 repo = remote or self
470 472 if key in repo.branchmap():
471 473 return key
472 474
473 475 repo = (remote and remote.local()) and remote or self
474 476 return repo[key].branch()
475 477
476 478 def local(self):
477 479 return True
478 480
479 481 def join(self, f):
480 482 return os.path.join(self.path, f)
481 483
482 484 def wjoin(self, f):
483 485 return os.path.join(self.root, f)
484 486
485 487 def file(self, f):
486 488 if f[0] == '/':
487 489 f = f[1:]
488 490 return filelog.filelog(self.sopener, f)
489 491
490 492 def changectx(self, changeid):
491 493 return self[changeid]
492 494
493 495 def parents(self, changeid=None):
494 496 '''get list of changectxs for parents of changeid'''
495 497 return self[changeid].parents()
496 498
497 499 def filectx(self, path, changeid=None, fileid=None):
498 500 """changeid can be a changeset revision, node, or tag.
499 501 fileid can be a file revision or node."""
500 502 return context.filectx(self, path, changeid, fileid)
501 503
502 504 def getcwd(self):
503 505 return self.dirstate.getcwd()
504 506
505 507 def pathto(self, f, cwd=None):
506 508 return self.dirstate.pathto(f, cwd)
507 509
508 510 def wfile(self, f, mode='r'):
509 511 return self.wopener(f, mode)
510 512
511 513 def _link(self, f):
512 514 return os.path.islink(self.wjoin(f))
513 515
514 516 def _loadfilter(self, filter):
515 517 if filter not in self.filterpats:
516 518 l = []
517 519 for pat, cmd in self.ui.configitems(filter):
518 520 if cmd == '!':
519 521 continue
520 522 mf = matchmod.match(self.root, '', [pat])
521 523 fn = None
522 524 params = cmd
523 525 for name, filterfn in self._datafilters.iteritems():
524 526 if cmd.startswith(name):
525 527 fn = filterfn
526 528 params = cmd[len(name):].lstrip()
527 529 break
528 530 if not fn:
529 531 fn = lambda s, c, **kwargs: util.filter(s, c)
530 532 # Wrap old filters not supporting keyword arguments
531 533 if not inspect.getargspec(fn)[2]:
532 534 oldfn = fn
533 535 fn = lambda s, c, **kwargs: oldfn(s, c)
534 536 l.append((mf, fn, params))
535 537 self.filterpats[filter] = l
536 538
537 539 def _filter(self, filter, filename, data):
538 540 self._loadfilter(filter)
539 541
540 542 for mf, fn, cmd in self.filterpats[filter]:
541 543 if mf(filename):
542 544 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
543 545 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
544 546 break
545 547
546 548 return data
547 549
548 550 def adddatafilter(self, name, filter):
549 551 self._datafilters[name] = filter
550 552
551 553 def wread(self, filename):
552 554 if self._link(filename):
553 555 data = os.readlink(self.wjoin(filename))
554 556 else:
555 557 data = self.wopener(filename, 'r').read()
556 558 return self._filter("encode", filename, data)
557 559
558 560 def wwrite(self, filename, data, flags):
559 561 data = self._filter("decode", filename, data)
560 562 try:
561 563 os.unlink(self.wjoin(filename))
562 564 except OSError:
563 565 pass
564 566 if 'l' in flags:
565 567 self.wopener.symlink(data, filename)
566 568 else:
567 569 self.wopener(filename, 'w').write(data)
568 570 if 'x' in flags:
569 571 util.set_flags(self.wjoin(filename), False, True)
570 572
571 573 def wwritedata(self, filename, data):
572 574 return self._filter("decode", filename, data)
573 575
574 576 def transaction(self, desc):
575 577 tr = self._transref and self._transref() or None
576 578 if tr and tr.running():
577 579 return tr.nest()
578 580
579 581 # abort here if the journal already exists
580 582 if os.path.exists(self.sjoin("journal")):
581 583 raise error.RepoError(
582 584 _("abandoned transaction found - run hg recover"))
583 585
584 586 # save dirstate for rollback
585 587 try:
586 588 ds = self.opener("dirstate").read()
587 589 except IOError:
588 590 ds = ""
589 591 self.opener("journal.dirstate", "w").write(ds)
590 592 self.opener("journal.branch", "w").write(self.dirstate.branch())
591 593 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
592 594
593 595 renames = [(self.sjoin("journal"), self.sjoin("undo")),
594 596 (self.join("journal.dirstate"), self.join("undo.dirstate")),
595 597 (self.join("journal.branch"), self.join("undo.branch")),
596 598 (self.join("journal.desc"), self.join("undo.desc"))]
597 599 tr = transaction.transaction(self.ui.warn, self.sopener,
598 600 self.sjoin("journal"),
599 601 aftertrans(renames),
600 602 self.store.createmode)
601 603 self._transref = weakref.ref(tr)
602 604 return tr
603 605
604 606 def recover(self):
605 607 lock = self.lock()
606 608 try:
607 609 if os.path.exists(self.sjoin("journal")):
608 610 self.ui.status(_("rolling back interrupted transaction\n"))
609 611 transaction.rollback(self.sopener, self.sjoin("journal"),
610 612 self.ui.warn)
611 613 self.invalidate()
612 614 return True
613 615 else:
614 616 self.ui.warn(_("no interrupted transaction available\n"))
615 617 return False
616 618 finally:
617 619 lock.release()
618 620
619 621 def rollback(self, dryrun=False):
620 622 wlock = lock = None
621 623 try:
622 624 wlock = self.wlock()
623 625 lock = self.lock()
624 626 if os.path.exists(self.sjoin("undo")):
625 627 try:
626 628 args = self.opener("undo.desc", "r").read().splitlines()
627 629 if len(args) >= 3 and self.ui.verbose:
628 630 desc = _("rolling back to revision %s"
629 631 " (undo %s: %s)\n") % (
630 632 int(args[0]) - 1, args[1], args[2])
631 633 elif len(args) >= 2:
632 634 desc = _("rolling back to revision %s (undo %s)\n") % (
633 635 int(args[0]) - 1, args[1])
634 636 except IOError:
635 637 desc = _("rolling back unknown transaction\n")
636 638 self.ui.status(desc)
637 639 if dryrun:
638 640 return
639 641 transaction.rollback(self.sopener, self.sjoin("undo"),
640 642 self.ui.warn)
641 643 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
642 644 try:
643 645 branch = self.opener("undo.branch").read()
644 646 self.dirstate.setbranch(branch)
645 647 except IOError:
646 648 self.ui.warn(_("Named branch could not be reset, "
647 649 "current branch still is: %s\n")
648 650 % encoding.tolocal(self.dirstate.branch()))
649 651 self.invalidate()
650 652 self.dirstate.invalidate()
651 653 self.destroyed()
652 654 else:
653 655 self.ui.warn(_("no rollback information available\n"))
654 656 return 1
655 657 finally:
656 658 release(lock, wlock)
657 659
658 660 def invalidatecaches(self):
659 661 self._tags = None
660 662 self._tagtypes = None
661 663 self.nodetagscache = None
662 664 self._branchcache = None # in UTF-8
663 665 self._branchcachetip = None
664 666
665 667 def invalidate(self):
666 668 for a in "changelog manifest".split():
667 669 if a in self.__dict__:
668 670 delattr(self, a)
669 671 self.invalidatecaches()
670 672
671 673 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
672 674 try:
673 675 l = lock.lock(lockname, 0, releasefn, desc=desc)
674 676 except error.LockHeld, inst:
675 677 if not wait:
676 678 raise
677 679 self.ui.warn(_("waiting for lock on %s held by %r\n") %
678 680 (desc, inst.locker))
679 681 # default to 600 seconds timeout
680 682 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
681 683 releasefn, desc=desc)
682 684 if acquirefn:
683 685 acquirefn()
684 686 return l
685 687
686 688 def lock(self, wait=True):
687 689 '''Lock the repository store (.hg/store) and return a weak reference
688 690 to the lock. Use this before modifying the store (e.g. committing or
689 691 stripping). If you are opening a transaction, get a lock as well.)'''
690 692 l = self._lockref and self._lockref()
691 693 if l is not None and l.held:
692 694 l.lock()
693 695 return l
694 696
695 697 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
696 698 _('repository %s') % self.origroot)
697 699 self._lockref = weakref.ref(l)
698 700 return l
699 701
700 702 def wlock(self, wait=True):
701 703 '''Lock the non-store parts of the repository (everything under
702 704 .hg except .hg/store) and return a weak reference to the lock.
703 705 Use this before modifying files in .hg.'''
704 706 l = self._wlockref and self._wlockref()
705 707 if l is not None and l.held:
706 708 l.lock()
707 709 return l
708 710
709 711 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
710 712 self.dirstate.invalidate, _('working directory of %s') %
711 713 self.origroot)
712 714 self._wlockref = weakref.ref(l)
713 715 return l
714 716
715 717 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
716 718 """
717 719 commit an individual file as part of a larger transaction
718 720 """
719 721
720 722 fname = fctx.path()
721 723 text = fctx.data()
722 724 flog = self.file(fname)
723 725 fparent1 = manifest1.get(fname, nullid)
724 726 fparent2 = fparent2o = manifest2.get(fname, nullid)
725 727
726 728 meta = {}
727 729 copy = fctx.renamed()
728 730 if copy and copy[0] != fname:
729 731 # Mark the new revision of this file as a copy of another
730 732 # file. This copy data will effectively act as a parent
731 733 # of this new revision. If this is a merge, the first
732 734 # parent will be the nullid (meaning "look up the copy data")
733 735 # and the second one will be the other parent. For example:
734 736 #
735 737 # 0 --- 1 --- 3 rev1 changes file foo
736 738 # \ / rev2 renames foo to bar and changes it
737 739 # \- 2 -/ rev3 should have bar with all changes and
738 740 # should record that bar descends from
739 741 # bar in rev2 and foo in rev1
740 742 #
741 743 # this allows this merge to succeed:
742 744 #
743 745 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
744 746 # \ / merging rev3 and rev4 should use bar@rev2
745 747 # \- 2 --- 4 as the merge base
746 748 #
747 749
748 750 cfname = copy[0]
749 751 crev = manifest1.get(cfname)
750 752 newfparent = fparent2
751 753
752 754 if manifest2: # branch merge
753 755 if fparent2 == nullid or crev is None: # copied on remote side
754 756 if cfname in manifest2:
755 757 crev = manifest2[cfname]
756 758 newfparent = fparent1
757 759
758 760 # find source in nearest ancestor if we've lost track
759 761 if not crev:
760 762 self.ui.debug(" %s: searching for copy revision for %s\n" %
761 763 (fname, cfname))
762 764 for ancestor in self['.'].ancestors():
763 765 if cfname in ancestor:
764 766 crev = ancestor[cfname].filenode()
765 767 break
766 768
767 769 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
768 770 meta["copy"] = cfname
769 771 meta["copyrev"] = hex(crev)
770 772 fparent1, fparent2 = nullid, newfparent
771 773 elif fparent2 != nullid:
772 774 # is one parent an ancestor of the other?
773 775 fparentancestor = flog.ancestor(fparent1, fparent2)
774 776 if fparentancestor == fparent1:
775 777 fparent1, fparent2 = fparent2, nullid
776 778 elif fparentancestor == fparent2:
777 779 fparent2 = nullid
778 780
779 781 # is the file changed?
780 782 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
781 783 changelist.append(fname)
782 784 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
783 785
784 786 # are just the flags changed during merge?
785 787 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
786 788 changelist.append(fname)
787 789
788 790 return fparent1
789 791
790 792 def commit(self, text="", user=None, date=None, match=None, force=False,
791 793 editor=False, extra={}):
792 794 """Add a new revision to current repository.
793 795
794 796 Revision information is gathered from the working directory,
795 797 match can be used to filter the committed files. If editor is
796 798 supplied, it is called to get a commit message.
797 799 """
798 800
799 801 def fail(f, msg):
800 802 raise util.Abort('%s: %s' % (f, msg))
801 803
802 804 if not match:
803 805 match = matchmod.always(self.root, '')
804 806
805 807 if not force:
806 808 vdirs = []
807 809 match.dir = vdirs.append
808 810 match.bad = fail
809 811
810 812 wlock = self.wlock()
811 813 try:
812 814 wctx = self[None]
813 815 merge = len(wctx.parents()) > 1
814 816
815 817 if (not force and merge and match and
816 818 (match.files() or match.anypats())):
817 819 raise util.Abort(_('cannot partially commit a merge '
818 820 '(do not specify files or patterns)'))
819 821
820 822 changes = self.status(match=match, clean=force)
821 823 if force:
822 824 changes[0].extend(changes[6]) # mq may commit unchanged files
823 825
824 826 # check subrepos
825 827 subs = []
826 828 removedsubs = set()
827 829 for p in wctx.parents():
828 830 removedsubs.update(s for s in p.substate if match(s))
829 831 for s in wctx.substate:
830 832 removedsubs.discard(s)
831 833 if match(s) and wctx.sub(s).dirty():
832 834 subs.append(s)
833 835 if (subs or removedsubs):
834 836 if (not match('.hgsub') and
835 837 '.hgsub' in (wctx.modified() + wctx.added())):
836 838 raise util.Abort(_("can't commit subrepos without .hgsub"))
837 839 if '.hgsubstate' not in changes[0]:
838 840 changes[0].insert(0, '.hgsubstate')
839 841
840 842 # make sure all explicit patterns are matched
841 843 if not force and match.files():
842 844 matched = set(changes[0] + changes[1] + changes[2])
843 845
844 846 for f in match.files():
845 847 if f == '.' or f in matched or f in wctx.substate:
846 848 continue
847 849 if f in changes[3]: # missing
848 850 fail(f, _('file not found!'))
849 851 if f in vdirs: # visited directory
850 852 d = f + '/'
851 853 for mf in matched:
852 854 if mf.startswith(d):
853 855 break
854 856 else:
855 857 fail(f, _("no match under directory!"))
856 858 elif f not in self.dirstate:
857 859 fail(f, _("file not tracked!"))
858 860
859 861 if (not force and not extra.get("close") and not merge
860 862 and not (changes[0] or changes[1] or changes[2])
861 863 and wctx.branch() == wctx.p1().branch()):
862 864 return None
863 865
864 866 ms = mergemod.mergestate(self)
865 867 for f in changes[0]:
866 868 if f in ms and ms[f] == 'u':
867 869 raise util.Abort(_("unresolved merge conflicts "
868 870 "(see hg resolve)"))
869 871
870 872 cctx = context.workingctx(self, text, user, date, extra, changes)
871 873 if editor:
872 874 cctx._text = editor(self, cctx, subs)
873 875 edited = (text != cctx._text)
874 876
875 877 # commit subs
876 878 if subs or removedsubs:
877 879 state = wctx.substate.copy()
878 880 for s in subs:
879 881 sub = wctx.sub(s)
880 882 self.ui.status(_('committing subrepository %s\n') %
881 883 subrepo.relpath(sub))
882 884 sr = sub.commit(cctx._text, user, date)
883 885 state[s] = (state[s][0], sr)
884 886 subrepo.writestate(self, state)
885 887
886 888 # Save commit message in case this transaction gets rolled back
887 889 # (e.g. by a pretxncommit hook). Leave the content alone on
888 890 # the assumption that the user will use the same editor again.
889 891 msgfile = self.opener('last-message.txt', 'wb')
890 892 msgfile.write(cctx._text)
891 893 msgfile.close()
892 894
893 895 p1, p2 = self.dirstate.parents()
894 896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
895 897 try:
896 898 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
897 899 ret = self.commitctx(cctx, True)
898 900 except:
899 901 if edited:
900 902 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
901 903 self.ui.write(
902 904 _('note: commit message saved in %s\n') % msgfn)
903 905 raise
904 906
905 907 # update dirstate and mergestate
906 908 for f in changes[0] + changes[1]:
907 909 self.dirstate.normal(f)
908 910 for f in changes[2]:
909 911 self.dirstate.forget(f)
910 912 self.dirstate.setparents(ret)
911 913 ms.reset()
912 914 finally:
913 915 wlock.release()
914 916
915 917 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
916 918 return ret
917 919
918 920 def commitctx(self, ctx, error=False):
919 921 """Add a new revision to current repository.
920 922 Revision information is passed via the context argument.
921 923 """
922 924
923 925 tr = lock = None
924 926 removed = ctx.removed()
925 927 p1, p2 = ctx.p1(), ctx.p2()
926 928 m1 = p1.manifest().copy()
927 929 m2 = p2.manifest()
928 930 user = ctx.user()
929 931
930 932 lock = self.lock()
931 933 try:
932 934 tr = self.transaction("commit")
933 935 trp = weakref.proxy(tr)
934 936
935 937 # check in files
936 938 new = {}
937 939 changed = []
938 940 linkrev = len(self)
939 941 for f in sorted(ctx.modified() + ctx.added()):
940 942 self.ui.note(f + "\n")
941 943 try:
942 944 fctx = ctx[f]
943 945 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
944 946 changed)
945 947 m1.set(f, fctx.flags())
946 948 except OSError, inst:
947 949 self.ui.warn(_("trouble committing %s!\n") % f)
948 950 raise
949 951 except IOError, inst:
950 952 errcode = getattr(inst, 'errno', errno.ENOENT)
951 953 if error or errcode and errcode != errno.ENOENT:
952 954 self.ui.warn(_("trouble committing %s!\n") % f)
953 955 raise
954 956 else:
955 957 removed.append(f)
956 958
957 959 # update manifest
958 960 m1.update(new)
959 961 removed = [f for f in sorted(removed) if f in m1 or f in m2]
960 962 drop = [f for f in removed if f in m1]
961 963 for f in drop:
962 964 del m1[f]
963 965 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
964 966 p2.manifestnode(), (new, drop))
965 967
966 968 # update changelog
967 969 self.changelog.delayupdate()
968 970 n = self.changelog.add(mn, changed + removed, ctx.description(),
969 971 trp, p1.node(), p2.node(),
970 972 user, ctx.date(), ctx.extra().copy())
971 973 p = lambda: self.changelog.writepending() and self.root or ""
972 974 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
973 975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
974 976 parent2=xp2, pending=p)
975 977 self.changelog.finalize(trp)
976 978 tr.close()
977 979
978 980 if self._branchcache:
979 self.branchtags()
981 self.updatebranchcache()
980 982 return n
981 983 finally:
982 984 if tr:
983 985 tr.release()
984 986 lock.release()
985 987
986 988 def destroyed(self):
987 989 '''Inform the repository that nodes have been destroyed.
988 990 Intended for use by strip and rollback, so there's a common
989 991 place for anything that has to be done after destroying history.'''
990 992 # XXX it might be nice if we could take the list of destroyed
991 993 # nodes, but I don't see an easy way for rollback() to do that
992 994
993 995 # Ensure the persistent tag cache is updated. Doing it now
994 996 # means that the tag cache only has to worry about destroyed
995 997 # heads immediately after a strip/rollback. That in turn
996 998 # guarantees that "cachetip == currenttip" (comparing both rev
997 999 # and node) always means no nodes have been added or destroyed.
998 1000
999 1001 # XXX this is suboptimal when qrefresh'ing: we strip the current
1000 1002 # head, refresh the tag cache, then immediately add a new head.
1001 1003 # But I think doing it this way is necessary for the "instant
1002 1004 # tag cache retrieval" case to work.
1003 1005 self.invalidatecaches()
1004 1006
1005 1007 def walk(self, match, node=None):
1006 1008 '''
1007 1009 walk recursively through the directory tree or a given
1008 1010 changeset, finding all files matched by the match
1009 1011 function
1010 1012 '''
1011 1013 return self[node].walk(match)
1012 1014
1013 1015 def status(self, node1='.', node2=None, match=None,
1014 1016 ignored=False, clean=False, unknown=False):
1015 1017 """return status of files between two nodes or node and working directory
1016 1018
1017 1019 If node1 is None, use the first dirstate parent instead.
1018 1020 If node2 is None, compare node1 with working directory.
1019 1021 """
1020 1022
1021 1023 def mfmatches(ctx):
1022 1024 mf = ctx.manifest().copy()
1023 1025 for fn in mf.keys():
1024 1026 if not match(fn):
1025 1027 del mf[fn]
1026 1028 return mf
1027 1029
1028 1030 if isinstance(node1, context.changectx):
1029 1031 ctx1 = node1
1030 1032 else:
1031 1033 ctx1 = self[node1]
1032 1034 if isinstance(node2, context.changectx):
1033 1035 ctx2 = node2
1034 1036 else:
1035 1037 ctx2 = self[node2]
1036 1038
1037 1039 working = ctx2.rev() is None
1038 1040 parentworking = working and ctx1 == self['.']
1039 1041 match = match or matchmod.always(self.root, self.getcwd())
1040 1042 listignored, listclean, listunknown = ignored, clean, unknown
1041 1043
1042 1044 # load earliest manifest first for caching reasons
1043 1045 if not working and ctx2.rev() < ctx1.rev():
1044 1046 ctx2.manifest()
1045 1047
1046 1048 if not parentworking:
1047 1049 def bad(f, msg):
1048 1050 if f not in ctx1:
1049 1051 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1050 1052 match.bad = bad
1051 1053
1052 1054 if working: # we need to scan the working dir
1053 1055 subrepos = []
1054 1056 if '.hgsub' in self.dirstate:
1055 1057 subrepos = ctx1.substate.keys()
1056 1058 s = self.dirstate.status(match, subrepos, listignored,
1057 1059 listclean, listunknown)
1058 1060 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1059 1061
1060 1062 # check for any possibly clean files
1061 1063 if parentworking and cmp:
1062 1064 fixup = []
1063 1065 # do a full compare of any files that might have changed
1064 1066 for f in sorted(cmp):
1065 1067 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1066 1068 or ctx1[f].cmp(ctx2[f])):
1067 1069 modified.append(f)
1068 1070 else:
1069 1071 fixup.append(f)
1070 1072
1071 1073 # update dirstate for files that are actually clean
1072 1074 if fixup:
1073 1075 if listclean:
1074 1076 clean += fixup
1075 1077
1076 1078 try:
1077 1079 # updating the dirstate is optional
1078 1080 # so we don't wait on the lock
1079 1081 wlock = self.wlock(False)
1080 1082 try:
1081 1083 for f in fixup:
1082 1084 self.dirstate.normal(f)
1083 1085 finally:
1084 1086 wlock.release()
1085 1087 except error.LockError:
1086 1088 pass
1087 1089
1088 1090 if not parentworking:
1089 1091 mf1 = mfmatches(ctx1)
1090 1092 if working:
1091 1093 # we are comparing working dir against non-parent
1092 1094 # generate a pseudo-manifest for the working dir
1093 1095 mf2 = mfmatches(self['.'])
1094 1096 for f in cmp + modified + added:
1095 1097 mf2[f] = None
1096 1098 mf2.set(f, ctx2.flags(f))
1097 1099 for f in removed:
1098 1100 if f in mf2:
1099 1101 del mf2[f]
1100 1102 else:
1101 1103 # we are comparing two revisions
1102 1104 deleted, unknown, ignored = [], [], []
1103 1105 mf2 = mfmatches(ctx2)
1104 1106
1105 1107 modified, added, clean = [], [], []
1106 1108 for fn in mf2:
1107 1109 if fn in mf1:
1108 1110 if (mf1.flags(fn) != mf2.flags(fn) or
1109 1111 (mf1[fn] != mf2[fn] and
1110 1112 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1111 1113 modified.append(fn)
1112 1114 elif listclean:
1113 1115 clean.append(fn)
1114 1116 del mf1[fn]
1115 1117 else:
1116 1118 added.append(fn)
1117 1119 removed = mf1.keys()
1118 1120
1119 1121 r = modified, added, removed, deleted, unknown, ignored, clean
1120 1122 [l.sort() for l in r]
1121 1123 return r
1122 1124
1123 1125 def heads(self, start=None):
1124 1126 heads = self.changelog.heads(start)
1125 1127 # sort the output in rev descending order
1126 1128 heads = [(-self.changelog.rev(h), h) for h in heads]
1127 1129 return [n for (r, n) in sorted(heads)]
1128 1130
1129 1131 def branchheads(self, branch=None, start=None, closed=False):
1130 1132 '''return a (possibly filtered) list of heads for the given branch
1131 1133
1132 1134 Heads are returned in topological order, from newest to oldest.
1133 1135 If branch is None, use the dirstate branch.
1134 1136 If start is not None, return only heads reachable from start.
1135 1137 If closed is True, return heads that are marked as closed as well.
1136 1138 '''
1137 1139 if branch is None:
1138 1140 branch = self[None].branch()
1139 1141 branches = self.branchmap()
1140 1142 if branch not in branches:
1141 1143 return []
1142 1144 # the cache returns heads ordered lowest to highest
1143 1145 bheads = list(reversed(branches[branch]))
1144 1146 if start is not None:
1145 1147 # filter out the heads that cannot be reached from startrev
1146 1148 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1147 1149 bheads = [h for h in bheads if h in fbheads]
1148 1150 if not closed:
1149 1151 bheads = [h for h in bheads if
1150 1152 ('close' not in self.changelog.read(h)[5])]
1151 1153 return bheads
1152 1154
1153 1155 def branches(self, nodes):
1154 1156 if not nodes:
1155 1157 nodes = [self.changelog.tip()]
1156 1158 b = []
1157 1159 for n in nodes:
1158 1160 t = n
1159 1161 while 1:
1160 1162 p = self.changelog.parents(n)
1161 1163 if p[1] != nullid or p[0] == nullid:
1162 1164 b.append((t, n, p[0], p[1]))
1163 1165 break
1164 1166 n = p[0]
1165 1167 return b
1166 1168
1167 1169 def between(self, pairs):
1168 1170 r = []
1169 1171
1170 1172 for top, bottom in pairs:
1171 1173 n, l, i = top, [], 0
1172 1174 f = 1
1173 1175
1174 1176 while n != bottom and n != nullid:
1175 1177 p = self.changelog.parents(n)[0]
1176 1178 if i == f:
1177 1179 l.append(n)
1178 1180 f = f * 2
1179 1181 n = p
1180 1182 i += 1
1181 1183
1182 1184 r.append(l)
1183 1185
1184 1186 return r
1185 1187
1186 1188 def pull(self, remote, heads=None, force=False):
1187 1189 lock = self.lock()
1188 1190 try:
1189 1191 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1190 1192 force=force)
1191 1193 common, fetch, rheads = tmp
1192 1194 if not fetch:
1193 1195 self.ui.status(_("no changes found\n"))
1194 1196 return 0
1195 1197
1196 1198 if fetch == [nullid]:
1197 1199 self.ui.status(_("requesting all changes\n"))
1198 1200 elif heads is None and remote.capable('changegroupsubset'):
1199 1201 # issue1320, avoid a race if remote changed after discovery
1200 1202 heads = rheads
1201 1203
1202 1204 if heads is None:
1203 1205 cg = remote.changegroup(fetch, 'pull')
1204 1206 else:
1205 1207 if not remote.capable('changegroupsubset'):
1206 1208 raise util.Abort(_("Partial pull cannot be done because "
1207 1209 "other repository doesn't support "
1208 1210 "changegroupsubset."))
1209 1211 cg = remote.changegroupsubset(fetch, heads, 'pull')
1210 1212 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1211 1213 finally:
1212 1214 lock.release()
1213 1215
1214 1216 def push(self, remote, force=False, revs=None, newbranch=False):
1215 1217 '''Push outgoing changesets (limited by revs) from the current
1216 1218 repository to remote. Return an integer:
1217 1219 - 0 means HTTP error *or* nothing to push
1218 1220 - 1 means we pushed and remote head count is unchanged *or*
1219 1221 we have outgoing changesets but refused to push
1220 1222 - other values as described by addchangegroup()
1221 1223 '''
1222 1224 # there are two ways to push to remote repo:
1223 1225 #
1224 1226 # addchangegroup assumes local user can lock remote
1225 1227 # repo (local filesystem, old ssh servers).
1226 1228 #
1227 1229 # unbundle assumes local user cannot lock remote repo (new ssh
1228 1230 # servers, http servers).
1229 1231
1230 1232 lock = None
1231 1233 unbundle = remote.capable('unbundle')
1232 1234 if not unbundle:
1233 1235 lock = remote.lock()
1234 1236 try:
1235 1237 ret = discovery.prepush(self, remote, force, revs, newbranch)
1236 1238 if ret[0] is None:
1237 1239 # and here we return 0 for "nothing to push" or 1 for
1238 1240 # "something to push but I refuse"
1239 1241 return ret[1]
1240 1242
1241 1243 cg, remote_heads = ret
1242 1244 if unbundle:
1243 1245 # local repo finds heads on server, finds out what revs it must
1244 1246 # push. once revs transferred, if server finds it has
1245 1247 # different heads (someone else won commit/push race), server
1246 1248 # aborts.
1247 1249 if force:
1248 1250 remote_heads = ['force']
1249 1251 # ssh: return remote's addchangegroup()
1250 1252 # http: return remote's addchangegroup() or 0 for error
1251 1253 return remote.unbundle(cg, remote_heads, 'push')
1252 1254 else:
1253 1255 # we return an integer indicating remote head count change
1254 1256 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1255 1257 finally:
1256 1258 if lock is not None:
1257 1259 lock.release()
1258 1260
1259 1261 def changegroupinfo(self, nodes, source):
1260 1262 if self.ui.verbose or source == 'bundle':
1261 1263 self.ui.status(_("%d changesets found\n") % len(nodes))
1262 1264 if self.ui.debugflag:
1263 1265 self.ui.debug("list of changesets:\n")
1264 1266 for node in nodes:
1265 1267 self.ui.debug("%s\n" % hex(node))
1266 1268
1267 1269 def changegroupsubset(self, bases, heads, source, extranodes=None):
1268 1270 """Compute a changegroup consisting of all the nodes that are
1269 1271 descendents of any of the bases and ancestors of any of the heads.
1270 1272 Return a chunkbuffer object whose read() method will return
1271 1273 successive changegroup chunks.
1272 1274
1273 1275 It is fairly complex as determining which filenodes and which
1274 1276 manifest nodes need to be included for the changeset to be complete
1275 1277 is non-trivial.
1276 1278
1277 1279 Another wrinkle is doing the reverse, figuring out which changeset in
1278 1280 the changegroup a particular filenode or manifestnode belongs to.
1279 1281
1280 1282 The caller can specify some nodes that must be included in the
1281 1283 changegroup using the extranodes argument. It should be a dict
1282 1284 where the keys are the filenames (or 1 for the manifest), and the
1283 1285 values are lists of (node, linknode) tuples, where node is a wanted
1284 1286 node and linknode is the changelog node that should be transmitted as
1285 1287 the linkrev.
1286 1288 """
1287 1289
1288 1290 # Set up some initial variables
1289 1291 # Make it easy to refer to self.changelog
1290 1292 cl = self.changelog
1291 1293 # Compute the list of changesets in this changegroup.
1292 1294 # Some bases may turn out to be superfluous, and some heads may be
1293 1295 # too. nodesbetween will return the minimal set of bases and heads
1294 1296 # necessary to re-create the changegroup.
1295 1297 if not bases:
1296 1298 bases = [nullid]
1297 1299 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1298 1300
1299 1301 if extranodes is None:
1300 1302 # can we go through the fast path ?
1301 1303 heads.sort()
1302 1304 allheads = self.heads()
1303 1305 allheads.sort()
1304 1306 if heads == allheads:
1305 1307 return self._changegroup(msng_cl_lst, source)
1306 1308
1307 1309 # slow path
1308 1310 self.hook('preoutgoing', throw=True, source=source)
1309 1311
1310 1312 self.changegroupinfo(msng_cl_lst, source)
1311 1313
1312 1314 # We assume that all ancestors of bases are known
1313 1315 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1314 1316
1315 1317 # Make it easy to refer to self.manifest
1316 1318 mnfst = self.manifest
1317 1319 # We don't know which manifests are missing yet
1318 1320 msng_mnfst_set = {}
1319 1321 # Nor do we know which filenodes are missing.
1320 1322 msng_filenode_set = {}
1321 1323
1322 1324 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1323 1325 junk = None
1324 1326
1325 1327 # A changeset always belongs to itself, so the changenode lookup
1326 1328 # function for a changenode is identity.
1327 1329 def identity(x):
1328 1330 return x
1329 1331
1330 1332 # A function generating function that sets up the initial environment
1331 1333 # the inner function.
1332 1334 def filenode_collector(changedfiles):
1333 1335 # This gathers information from each manifestnode included in the
1334 1336 # changegroup about which filenodes the manifest node references
1335 1337 # so we can include those in the changegroup too.
1336 1338 #
1337 1339 # It also remembers which changenode each filenode belongs to. It
1338 1340 # does this by assuming the a filenode belongs to the changenode
1339 1341 # the first manifest that references it belongs to.
1340 1342 def collect_msng_filenodes(mnfstnode):
1341 1343 r = mnfst.rev(mnfstnode)
1342 1344 if r - 1 in mnfst.parentrevs(r):
1343 1345 # If the previous rev is one of the parents,
1344 1346 # we only need to see a diff.
1345 1347 deltamf = mnfst.readdelta(mnfstnode)
1346 1348 # For each line in the delta
1347 1349 for f, fnode in deltamf.iteritems():
1348 1350 # And if the file is in the list of files we care
1349 1351 # about.
1350 1352 if f in changedfiles:
1351 1353 # Get the changenode this manifest belongs to
1352 1354 clnode = msng_mnfst_set[mnfstnode]
1353 1355 # Create the set of filenodes for the file if
1354 1356 # there isn't one already.
1355 1357 ndset = msng_filenode_set.setdefault(f, {})
1356 1358 # And set the filenode's changelog node to the
1357 1359 # manifest's if it hasn't been set already.
1358 1360 ndset.setdefault(fnode, clnode)
1359 1361 else:
1360 1362 # Otherwise we need a full manifest.
1361 1363 m = mnfst.read(mnfstnode)
1362 1364 # For every file in we care about.
1363 1365 for f in changedfiles:
1364 1366 fnode = m.get(f, None)
1365 1367 # If it's in the manifest
1366 1368 if fnode is not None:
1367 1369 # See comments above.
1368 1370 clnode = msng_mnfst_set[mnfstnode]
1369 1371 ndset = msng_filenode_set.setdefault(f, {})
1370 1372 ndset.setdefault(fnode, clnode)
1371 1373 return collect_msng_filenodes
1372 1374
1373 1375 # If we determine that a particular file or manifest node must be a
1374 1376 # node that the recipient of the changegroup will already have, we can
1375 1377 # also assume the recipient will have all the parents. This function
1376 1378 # prunes them from the set of missing nodes.
1377 1379 def prune(revlog, missingnodes):
1378 1380 hasset = set()
1379 1381 # If a 'missing' filenode thinks it belongs to a changenode we
1380 1382 # assume the recipient must have, then the recipient must have
1381 1383 # that filenode.
1382 1384 for n in missingnodes:
1383 1385 clrev = revlog.linkrev(revlog.rev(n))
1384 1386 if clrev in commonrevs:
1385 1387 hasset.add(n)
1386 1388 for n in hasset:
1387 1389 missingnodes.pop(n, None)
1388 1390 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1389 1391 missingnodes.pop(revlog.node(r), None)
1390 1392
1391 1393 # Add the nodes that were explicitly requested.
1392 1394 def add_extra_nodes(name, nodes):
1393 1395 if not extranodes or name not in extranodes:
1394 1396 return
1395 1397
1396 1398 for node, linknode in extranodes[name]:
1397 1399 if node not in nodes:
1398 1400 nodes[node] = linknode
1399 1401
1400 1402 # Now that we have all theses utility functions to help out and
1401 1403 # logically divide up the task, generate the group.
1402 1404 def gengroup():
1403 1405 # The set of changed files starts empty.
1404 1406 changedfiles = set()
1405 1407 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1406 1408
1407 1409 # Create a changenode group generator that will call our functions
1408 1410 # back to lookup the owning changenode and collect information.
1409 1411 group = cl.group(msng_cl_lst, identity, collect)
1410 1412 for cnt, chnk in enumerate(group):
1411 1413 yield chnk
1412 1414 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1413 1415 self.ui.progress(_('bundling changes'), None)
1414 1416
1415 1417 prune(mnfst, msng_mnfst_set)
1416 1418 add_extra_nodes(1, msng_mnfst_set)
1417 1419 msng_mnfst_lst = msng_mnfst_set.keys()
1418 1420 # Sort the manifestnodes by revision number.
1419 1421 msng_mnfst_lst.sort(key=mnfst.rev)
1420 1422 # Create a generator for the manifestnodes that calls our lookup
1421 1423 # and data collection functions back.
1422 1424 group = mnfst.group(msng_mnfst_lst,
1423 1425 lambda mnode: msng_mnfst_set[mnode],
1424 1426 filenode_collector(changedfiles))
1425 1427 for cnt, chnk in enumerate(group):
1426 1428 yield chnk
1427 1429 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1428 1430 self.ui.progress(_('bundling manifests'), None)
1429 1431
1430 1432 # These are no longer needed, dereference and toss the memory for
1431 1433 # them.
1432 1434 msng_mnfst_lst = None
1433 1435 msng_mnfst_set.clear()
1434 1436
1435 1437 if extranodes:
1436 1438 for fname in extranodes:
1437 1439 if isinstance(fname, int):
1438 1440 continue
1439 1441 msng_filenode_set.setdefault(fname, {})
1440 1442 changedfiles.add(fname)
1441 1443 # Go through all our files in order sorted by name.
1442 1444 cnt = 0
1443 1445 for fname in sorted(changedfiles):
1444 1446 filerevlog = self.file(fname)
1445 1447 if not len(filerevlog):
1446 1448 raise util.Abort(_("empty or missing revlog for %s") % fname)
1447 1449 # Toss out the filenodes that the recipient isn't really
1448 1450 # missing.
1449 1451 missingfnodes = msng_filenode_set.pop(fname, {})
1450 1452 prune(filerevlog, missingfnodes)
1451 1453 add_extra_nodes(fname, missingfnodes)
1452 1454 # If any filenodes are left, generate the group for them,
1453 1455 # otherwise don't bother.
1454 1456 if missingfnodes:
1455 1457 yield changegroup.chunkheader(len(fname))
1456 1458 yield fname
1457 1459 # Sort the filenodes by their revision # (topological order)
1458 1460 nodeiter = list(missingfnodes)
1459 1461 nodeiter.sort(key=filerevlog.rev)
1460 1462 # Create a group generator and only pass in a changenode
1461 1463 # lookup function as we need to collect no information
1462 1464 # from filenodes.
1463 1465 group = filerevlog.group(nodeiter,
1464 1466 lambda fnode: missingfnodes[fnode])
1465 1467 for chnk in group:
1466 1468 self.ui.progress(
1467 1469 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1468 1470 cnt += 1
1469 1471 yield chnk
1470 1472 # Signal that no more groups are left.
1471 1473 yield changegroup.closechunk()
1472 1474 self.ui.progress(_('bundling files'), None)
1473 1475
1474 1476 if msng_cl_lst:
1475 1477 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1476 1478
1477 1479 return util.chunkbuffer(gengroup())
1478 1480
1479 1481 def changegroup(self, basenodes, source):
1480 1482 # to avoid a race we use changegroupsubset() (issue1320)
1481 1483 return self.changegroupsubset(basenodes, self.heads(), source)
1482 1484
1483 1485 def _changegroup(self, nodes, source):
1484 1486 """Compute the changegroup of all nodes that we have that a recipient
1485 1487 doesn't. Return a chunkbuffer object whose read() method will return
1486 1488 successive changegroup chunks.
1487 1489
1488 1490 This is much easier than the previous function as we can assume that
1489 1491 the recipient has any changenode we aren't sending them.
1490 1492
1491 1493 nodes is the set of nodes to send"""
1492 1494
1493 1495 self.hook('preoutgoing', throw=True, source=source)
1494 1496
1495 1497 cl = self.changelog
1496 1498 revset = set([cl.rev(n) for n in nodes])
1497 1499 self.changegroupinfo(nodes, source)
1498 1500
1499 1501 def identity(x):
1500 1502 return x
1501 1503
1502 1504 def gennodelst(log):
1503 1505 for r in log:
1504 1506 if log.linkrev(r) in revset:
1505 1507 yield log.node(r)
1506 1508
1507 1509 def lookuplinkrev_func(revlog):
1508 1510 def lookuplinkrev(n):
1509 1511 return cl.node(revlog.linkrev(revlog.rev(n)))
1510 1512 return lookuplinkrev
1511 1513
1512 1514 def gengroup():
1513 1515 '''yield a sequence of changegroup chunks (strings)'''
1514 1516 # construct a list of all changed files
1515 1517 changedfiles = set()
1516 1518 mmfs = {}
1517 1519 collect = changegroup.collector(cl, mmfs, changedfiles)
1518 1520
1519 1521 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1520 1522 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1521 1523 yield chnk
1522 1524 self.ui.progress(_('bundling changes'), None)
1523 1525
1524 1526 mnfst = self.manifest
1525 1527 nodeiter = gennodelst(mnfst)
1526 1528 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1527 1529 lookuplinkrev_func(mnfst))):
1528 1530 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1529 1531 yield chnk
1530 1532 self.ui.progress(_('bundling manifests'), None)
1531 1533
1532 1534 cnt = 0
1533 1535 for fname in sorted(changedfiles):
1534 1536 filerevlog = self.file(fname)
1535 1537 if not len(filerevlog):
1536 1538 raise util.Abort(_("empty or missing revlog for %s") % fname)
1537 1539 nodeiter = gennodelst(filerevlog)
1538 1540 nodeiter = list(nodeiter)
1539 1541 if nodeiter:
1540 1542 yield changegroup.chunkheader(len(fname))
1541 1543 yield fname
1542 1544 lookup = lookuplinkrev_func(filerevlog)
1543 1545 for chnk in filerevlog.group(nodeiter, lookup):
1544 1546 self.ui.progress(
1545 1547 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1546 1548 cnt += 1
1547 1549 yield chnk
1548 1550 self.ui.progress(_('bundling files'), None)
1549 1551
1550 1552 yield changegroup.closechunk()
1551 1553
1552 1554 if nodes:
1553 1555 self.hook('outgoing', node=hex(nodes[0]), source=source)
1554 1556
1555 1557 return util.chunkbuffer(gengroup())
1556 1558
1557 1559 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1558 1560 """Add the changegroup returned by source.read() to this repo.
1559 1561 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1560 1562 the URL of the repo where this changegroup is coming from.
1561 1563
1562 1564 Return an integer summarizing the change to this repo:
1563 1565 - nothing changed or no source: 0
1564 1566 - more heads than before: 1+added heads (2..n)
1565 1567 - fewer heads than before: -1-removed heads (-2..-n)
1566 1568 - number of heads stays the same: 1
1567 1569 """
1568 1570 def csmap(x):
1569 1571 self.ui.debug("add changeset %s\n" % short(x))
1570 1572 return len(cl)
1571 1573
1572 1574 def revmap(x):
1573 1575 return cl.rev(x)
1574 1576
1575 1577 if not source:
1576 1578 return 0
1577 1579
1578 1580 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1579 1581
1580 1582 changesets = files = revisions = 0
1581 1583 efiles = set()
1582 1584
1583 1585 # write changelog data to temp files so concurrent readers will not see
1584 1586 # inconsistent view
1585 1587 cl = self.changelog
1586 1588 cl.delayupdate()
1587 1589 oldheads = len(cl.heads())
1588 1590
1589 1591 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1590 1592 try:
1591 1593 trp = weakref.proxy(tr)
1592 1594 # pull off the changeset group
1593 1595 self.ui.status(_("adding changesets\n"))
1594 1596 clstart = len(cl)
1595 1597 class prog(object):
1596 1598 step = _('changesets')
1597 1599 count = 1
1598 1600 ui = self.ui
1599 1601 total = None
1600 1602 def __call__(self):
1601 1603 self.ui.progress(self.step, self.count, unit=_('chunks'),
1602 1604 total=self.total)
1603 1605 self.count += 1
1604 1606 pr = prog()
1605 1607 chunkiter = changegroup.chunkiter(source, progress=pr)
1606 1608 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1607 1609 raise util.Abort(_("received changelog group is empty"))
1608 1610 clend = len(cl)
1609 1611 changesets = clend - clstart
1610 1612 for c in xrange(clstart, clend):
1611 1613 efiles.update(self[c].files())
1612 1614 efiles = len(efiles)
1613 1615 self.ui.progress(_('changesets'), None)
1614 1616
1615 1617 # pull off the manifest group
1616 1618 self.ui.status(_("adding manifests\n"))
1617 1619 pr.step = _('manifests')
1618 1620 pr.count = 1
1619 1621 pr.total = changesets # manifests <= changesets
1620 1622 chunkiter = changegroup.chunkiter(source, progress=pr)
1621 1623 # no need to check for empty manifest group here:
1622 1624 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1623 1625 # no new manifest will be created and the manifest group will
1624 1626 # be empty during the pull
1625 1627 self.manifest.addgroup(chunkiter, revmap, trp)
1626 1628 self.ui.progress(_('manifests'), None)
1627 1629
1628 1630 needfiles = {}
1629 1631 if self.ui.configbool('server', 'validate', default=False):
1630 1632 # validate incoming csets have their manifests
1631 1633 for cset in xrange(clstart, clend):
1632 1634 mfest = self.changelog.read(self.changelog.node(cset))[0]
1633 1635 mfest = self.manifest.readdelta(mfest)
1634 1636 # store file nodes we must see
1635 1637 for f, n in mfest.iteritems():
1636 1638 needfiles.setdefault(f, set()).add(n)
1637 1639
1638 1640 # process the files
1639 1641 self.ui.status(_("adding file changes\n"))
1640 1642 pr.step = 'files'
1641 1643 pr.count = 1
1642 1644 pr.total = efiles
1643 1645 while 1:
1644 1646 f = changegroup.getchunk(source)
1645 1647 if not f:
1646 1648 break
1647 1649 self.ui.debug("adding %s revisions\n" % f)
1648 1650 pr()
1649 1651 fl = self.file(f)
1650 1652 o = len(fl)
1651 1653 chunkiter = changegroup.chunkiter(source)
1652 1654 if fl.addgroup(chunkiter, revmap, trp) is None:
1653 1655 raise util.Abort(_("received file revlog group is empty"))
1654 1656 revisions += len(fl) - o
1655 1657 files += 1
1656 1658 if f in needfiles:
1657 1659 needs = needfiles[f]
1658 1660 for new in xrange(o, len(fl)):
1659 1661 n = fl.node(new)
1660 1662 if n in needs:
1661 1663 needs.remove(n)
1662 1664 if not needs:
1663 1665 del needfiles[f]
1664 1666 self.ui.progress(_('files'), None)
1665 1667
1666 1668 for f, needs in needfiles.iteritems():
1667 1669 fl = self.file(f)
1668 1670 for n in needs:
1669 1671 try:
1670 1672 fl.rev(n)
1671 1673 except error.LookupError:
1672 1674 raise util.Abort(
1673 1675 _('missing file data for %s:%s - run hg verify') %
1674 1676 (f, hex(n)))
1675 1677
1676 1678 newheads = len(cl.heads())
1677 1679 heads = ""
1678 1680 if oldheads and newheads != oldheads:
1679 1681 heads = _(" (%+d heads)") % (newheads - oldheads)
1680 1682
1681 1683 self.ui.status(_("added %d changesets"
1682 1684 " with %d changes to %d files%s\n")
1683 1685 % (changesets, revisions, files, heads))
1684 1686
1685 1687 if changesets > 0:
1686 1688 p = lambda: cl.writepending() and self.root or ""
1687 1689 self.hook('pretxnchangegroup', throw=True,
1688 1690 node=hex(cl.node(clstart)), source=srctype,
1689 1691 url=url, pending=p)
1690 1692
1691 1693 # make changelog see real files again
1692 1694 cl.finalize(trp)
1693 1695
1694 1696 tr.close()
1695 1697 finally:
1696 1698 tr.release()
1697 1699 if lock:
1698 1700 lock.release()
1699 1701
1700 1702 if changesets > 0:
1701 1703 # forcefully update the on-disk branch cache
1702 1704 self.ui.debug("updating the branch cache\n")
1703 self.branchtags()
1705 self.updatebranchcache()
1704 1706 self.hook("changegroup", node=hex(cl.node(clstart)),
1705 1707 source=srctype, url=url)
1706 1708
1707 1709 for i in xrange(clstart, clend):
1708 1710 self.hook("incoming", node=hex(cl.node(i)),
1709 1711 source=srctype, url=url)
1710 1712
1711 1713 # never return 0 here:
1712 1714 if newheads < oldheads:
1713 1715 return newheads - oldheads - 1
1714 1716 else:
1715 1717 return newheads - oldheads + 1
1716 1718
1717 1719
1718 1720 def stream_in(self, remote):
1719 1721 fp = remote.stream_out()
1720 1722 l = fp.readline()
1721 1723 try:
1722 1724 resp = int(l)
1723 1725 except ValueError:
1724 1726 raise error.ResponseError(
1725 1727 _('Unexpected response from remote server:'), l)
1726 1728 if resp == 1:
1727 1729 raise util.Abort(_('operation forbidden by server'))
1728 1730 elif resp == 2:
1729 1731 raise util.Abort(_('locking the remote repository failed'))
1730 1732 elif resp != 0:
1731 1733 raise util.Abort(_('the server sent an unknown error code'))
1732 1734 self.ui.status(_('streaming all changes\n'))
1733 1735 l = fp.readline()
1734 1736 try:
1735 1737 total_files, total_bytes = map(int, l.split(' ', 1))
1736 1738 except (ValueError, TypeError):
1737 1739 raise error.ResponseError(
1738 1740 _('Unexpected response from remote server:'), l)
1739 1741 self.ui.status(_('%d files to transfer, %s of data\n') %
1740 1742 (total_files, util.bytecount(total_bytes)))
1741 1743 start = time.time()
1742 1744 for i in xrange(total_files):
1743 1745 # XXX doesn't support '\n' or '\r' in filenames
1744 1746 l = fp.readline()
1745 1747 try:
1746 1748 name, size = l.split('\0', 1)
1747 1749 size = int(size)
1748 1750 except (ValueError, TypeError):
1749 1751 raise error.ResponseError(
1750 1752 _('Unexpected response from remote server:'), l)
1751 1753 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1752 1754 # for backwards compat, name was partially encoded
1753 1755 ofp = self.sopener(store.decodedir(name), 'w')
1754 1756 for chunk in util.filechunkiter(fp, limit=size):
1755 1757 ofp.write(chunk)
1756 1758 ofp.close()
1757 1759 elapsed = time.time() - start
1758 1760 if elapsed <= 0:
1759 1761 elapsed = 0.001
1760 1762 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1761 1763 (util.bytecount(total_bytes), elapsed,
1762 1764 util.bytecount(total_bytes / elapsed)))
1763 1765 self.invalidate()
1764 1766 return len(self.heads()) + 1
1765 1767
1766 1768 def clone(self, remote, heads=[], stream=False):
1767 1769 '''clone remote repository.
1768 1770
1769 1771 keyword arguments:
1770 1772 heads: list of revs to clone (forces use of pull)
1771 1773 stream: use streaming clone if possible'''
1772 1774
1773 1775 # now, all clients that can request uncompressed clones can
1774 1776 # read repo formats supported by all servers that can serve
1775 1777 # them.
1776 1778
1777 1779 # if revlog format changes, client will have to check version
1778 1780 # and format flags on "stream" capability, and use
1779 1781 # uncompressed only if compatible.
1780 1782
1781 1783 if stream and not heads and remote.capable('stream'):
1782 1784 return self.stream_in(remote)
1783 1785 return self.pull(remote, heads)
1784 1786
1785 1787 def pushkey(self, namespace, key, old, new):
1786 1788 return pushkey.push(self, namespace, key, old, new)
1787 1789
1788 1790 def listkeys(self, namespace):
1789 1791 return pushkey.list(self, namespace)
1790 1792
1791 1793 # used to avoid circular references so destructors work
1792 1794 def aftertrans(files):
1793 1795 renamefiles = [tuple(t) for t in files]
1794 1796 def a():
1795 1797 for src, dest in renamefiles:
1796 1798 util.rename(src, dest)
1797 1799 return a
1798 1800
1799 1801 def instance(ui, path, create):
1800 1802 return localrepository(ui, util.drop_scheme('file', path), create)
1801 1803
1802 1804 def islocal(path):
1803 1805 return True
General Comments 0
You need to be logged in to leave comments. Login now