##// END OF EJS Templates
clfilter: add actual repo filtering mechanism...
Pierre-Yves David -
r18100:3a6ddacb default
parent child Browse files
Show More
@@ -0,0 +1,94 b''
1 # repoview.py - Filtered view of a localrepo object
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
5 #
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
8
9 import copy
10
11 # function to compute filtered set
12 filtertable = {}
13
14 def filteredrevs(repo, filtername):
15 """returns set of filtered revision for this filter name"""
16 return filtertable[filtername](repo.unfiltered())
17
18 class repoview(object):
19 """Provide a read/write view of a repo through a filtered changelog
20
21 This object is used to access a filtered version of a repository without
22 altering the original repository object itself. We can not alter the
23 original object for two main reasons:
24 - It prevents the use of a repo with multiple filters at the same time. In
25 particular when multiple threads are involved.
26 - It makes scope of the filtering harder to control.
27
28 This object behaves very closely to the original repository. All attribute
29 operations are done on the original repository:
30 - An access to `repoview.someattr` actually returns `repo.someattr`,
31 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
32 - A deletion of `repoview.someattr` actually drops `someattr`
33 from `repo.__dict__`.
34
35 The only exception is the `changelog` property. It is overridden to return
36 a (surface) copy of `repo.changelog` with some revisions filtered. The
37 `filtername` attribute of the view control the revisions that need to be
38 filtered. (the fact the changelog is copied is an implementation detail).
39
40 Unlike attributes, this object intercepts all method calls. This means that
41 all methods are run on the `repoview` object with the filtered `changelog`
42 property. For this purpose the simple `repoview` class must be mixed with
43 the actual class of the repository. This ensures that the resulting
44 `repoview` object have the very same methods than the repo object. This
45 leads to the property below.
46
47 repoview.method() --> repo.__class__.method(repoview)
48
49 The inheritance has to be done dynamically because `repo` can be of any
50 subclasses of `localrepo`. Eg: `bundlerepo` or `httprepo`.
51 """
52
53 def __init__(self, repo, filtername):
54 object.__setattr__(self, '_unfilteredrepo', repo)
55 object.__setattr__(self, 'filtername', filtername)
56
57 # not a cacheproperty on purpose we shall implement a proper cache later
58 @property
59 def changelog(self):
60 """return a filtered version of the changeset
61
62 this changelog must not be used for writing"""
63 # some cache may be implemented later
64 cl = copy.copy(self._unfilteredrepo.changelog)
65 cl.filteredrevs = filteredrevs(self._unfilteredrepo, self.filtername)
66 return cl
67
68 def unfiltered(self):
69 """Return an unfiltered version of a repo"""
70 return self._unfilteredrepo
71
72 def filtered(self, name):
73 """Return a filtered version of a repository"""
74 if name == self.filtername:
75 return self
76 return self.unfiltered().filtered(name)
77
78 # everything access are forwarded to the proxied repo
79 def __getattr__(self, attr):
80 return getattr(self._unfilteredrepo, attr)
81
82 def __setattr__(self, attr, value):
83 return setattr(self._unfilteredrepo, attr, value)
84
85 def __delattr__(self, attr):
86 return delattr(self._unfilteredrepo, attr)
87
88 # The `requirement` attribut is initialiazed during __init__. But
89 # __getattr__ won't be called as it also exists on the class. We need
90 # explicit forwarding to main repo here
91 @property
92 def requirements(self):
93 return self._unfilteredrepo.requirements
94
@@ -1,2683 +1,2691 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class repofilecache(filecache):
22 22 """All filecache usage on repo are done for logic that should be unfiltered
23 23 """
24 24
25 25 def __get__(self, repo, type=None):
26 26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 27 def __set__(self, repo, value):
28 28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 29 def __delete__(self, repo):
30 30 return super(repofilecache, self).__delete__(repo.unfiltered())
31 31
32 32 class storecache(repofilecache):
33 33 """filecache for files in the store"""
34 34 def join(self, obj, fname):
35 35 return obj.sjoin(fname)
36 36
37 37 class unfilteredpropertycache(propertycache):
38 38 """propertycache that apply to unfiltered repo only"""
39 39
40 40 def __get__(self, repo, type=None):
41 41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 42
43 43 class filteredpropertycache(propertycache):
44 44 """propertycache that must take filtering in account"""
45 45
46 46 def cachevalue(self, obj, value):
47 47 object.__setattr__(obj, self.name, value)
48 48
49 49
50 50 def hasunfilteredcache(repo, name):
51 51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 52 return name in vars(repo.unfiltered())
53 53
54 54 def unfilteredmethod(orig):
55 55 """decorate method that always need to be run on unfiltered version"""
56 56 def wrapper(repo, *args, **kwargs):
57 57 return orig(repo.unfiltered(), *args, **kwargs)
58 58 return wrapper
59 59
60 60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 62
63 63 class localpeer(peer.peerrepository):
64 64 '''peer for a local repo; reflects only the most recent API'''
65 65
66 66 def __init__(self, repo, caps=MODERNCAPS):
67 67 peer.peerrepository.__init__(self)
68 68 self._repo = repo
69 69 self.ui = repo.ui
70 70 self._caps = repo._restrictcapabilities(caps)
71 71 self.requirements = repo.requirements
72 72 self.supportedformats = repo.supportedformats
73 73
74 74 def close(self):
75 75 self._repo.close()
76 76
77 77 def _capabilities(self):
78 78 return self._caps
79 79
80 80 def local(self):
81 81 return self._repo
82 82
83 83 def canpush(self):
84 84 return True
85 85
86 86 def url(self):
87 87 return self._repo.url()
88 88
89 89 def lookup(self, key):
90 90 return self._repo.lookup(key)
91 91
92 92 def branchmap(self):
93 93 return discovery.visiblebranchmap(self._repo)
94 94
95 95 def heads(self):
96 96 return discovery.visibleheads(self._repo)
97 97
98 98 def known(self, nodes):
99 99 return self._repo.known(nodes)
100 100
101 101 def getbundle(self, source, heads=None, common=None):
102 102 return self._repo.getbundle(source, heads=heads, common=common)
103 103
104 104 # TODO We might want to move the next two calls into legacypeer and add
105 105 # unbundle instead.
106 106
107 107 def lock(self):
108 108 return self._repo.lock()
109 109
110 110 def addchangegroup(self, cg, source, url):
111 111 return self._repo.addchangegroup(cg, source, url)
112 112
113 113 def pushkey(self, namespace, key, old, new):
114 114 return self._repo.pushkey(namespace, key, old, new)
115 115
116 116 def listkeys(self, namespace):
117 117 return self._repo.listkeys(namespace)
118 118
119 119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 120 '''used to test argument passing over the wire'''
121 121 return "%s %s %s %s %s" % (one, two, three, four, five)
122 122
123 123 class locallegacypeer(localpeer):
124 124 '''peer extension which implements legacy methods too; used for tests with
125 125 restricted capabilities'''
126 126
127 127 def __init__(self, repo):
128 128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 129
130 130 def branches(self, nodes):
131 131 return self._repo.branches(nodes)
132 132
133 133 def between(self, pairs):
134 134 return self._repo.between(pairs)
135 135
136 136 def changegroup(self, basenodes, source):
137 137 return self._repo.changegroup(basenodes, source)
138 138
139 139 def changegroupsubset(self, bases, heads, source):
140 140 return self._repo.changegroupsubset(bases, heads, source)
141 141
142 142 class localrepository(object):
143 143
144 144 supportedformats = set(('revlogv1', 'generaldelta'))
145 145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 146 'dotencode'))
147 147 openerreqs = set(('revlogv1', 'generaldelta'))
148 148 requirements = ['revlogv1']
149 149
150 150 def _baserequirements(self, create):
151 151 return self.requirements[:]
152 152
153 153 def __init__(self, baseui, path=None, create=False):
154 154 self.wvfs = scmutil.vfs(path, expand=True)
155 155 self.wopener = self.wvfs
156 156 self.root = self.wvfs.base
157 157 self.path = self.wvfs.join(".hg")
158 158 self.origroot = path
159 159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 160 self.vfs = scmutil.vfs(self.path)
161 161 self.opener = self.vfs
162 162 self.baseui = baseui
163 163 self.ui = baseui.copy()
164 164 # A list of callback to shape the phase if no data were found.
165 165 # Callback are in the form: func(repo, roots) --> processed root.
166 166 # This list it to be filled by extension during repo setup
167 167 self._phasedefaults = []
168 168 try:
169 169 self.ui.readconfig(self.join("hgrc"), self.root)
170 170 extensions.loadall(self.ui)
171 171 except IOError:
172 172 pass
173 173
174 174 if not self.vfs.isdir():
175 175 if create:
176 176 if not self.wvfs.exists():
177 177 self.wvfs.makedirs()
178 178 self.vfs.makedir(notindexed=True)
179 179 requirements = self._baserequirements(create)
180 180 if self.ui.configbool('format', 'usestore', True):
181 181 self.vfs.mkdir("store")
182 182 requirements.append("store")
183 183 if self.ui.configbool('format', 'usefncache', True):
184 184 requirements.append("fncache")
185 185 if self.ui.configbool('format', 'dotencode', True):
186 186 requirements.append('dotencode')
187 187 # create an invalid changelog
188 188 self.vfs.append(
189 189 "00changelog.i",
190 190 '\0\0\0\2' # represents revlogv2
191 191 ' dummy changelog to prevent using the old repo layout'
192 192 )
193 193 if self.ui.configbool('format', 'generaldelta', False):
194 194 requirements.append("generaldelta")
195 195 requirements = set(requirements)
196 196 else:
197 197 raise error.RepoError(_("repository %s not found") % path)
198 198 elif create:
199 199 raise error.RepoError(_("repository %s already exists") % path)
200 200 else:
201 201 try:
202 202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 203 except IOError, inst:
204 204 if inst.errno != errno.ENOENT:
205 205 raise
206 206 requirements = set()
207 207
208 208 self.sharedpath = self.path
209 209 try:
210 210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 211 if not os.path.exists(s):
212 212 raise error.RepoError(
213 213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 214 self.sharedpath = s
215 215 except IOError, inst:
216 216 if inst.errno != errno.ENOENT:
217 217 raise
218 218
219 219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 220 self.spath = self.store.path
221 221 self.svfs = self.store.vfs
222 222 self.sopener = self.svfs
223 223 self.sjoin = self.store.join
224 224 self.vfs.createmode = self.store.createmode
225 225 self._applyrequirements(requirements)
226 226 if create:
227 227 self._writerequirements()
228 228
229 229
230 230 self._branchcache = None
231 231 self._branchcachetip = None
232 232 self.filterpats = {}
233 233 self._datafilters = {}
234 234 self._transref = self._lockref = self._wlockref = None
235 235
236 236 # A cache for various files under .hg/ that tracks file changes,
237 237 # (used by the filecache decorator)
238 238 #
239 239 # Maps a property name to its util.filecacheentry
240 240 self._filecache = {}
241 241
242 242 def close(self):
243 243 pass
244 244
245 245 def _restrictcapabilities(self, caps):
246 246 return caps
247 247
248 248 def _applyrequirements(self, requirements):
249 249 self.requirements = requirements
250 250 self.sopener.options = dict((r, 1) for r in requirements
251 251 if r in self.openerreqs)
252 252
253 253 def _writerequirements(self):
254 254 reqfile = self.opener("requires", "w")
255 255 for r in self.requirements:
256 256 reqfile.write("%s\n" % r)
257 257 reqfile.close()
258 258
259 259 def _checknested(self, path):
260 260 """Determine if path is a legal nested repository."""
261 261 if not path.startswith(self.root):
262 262 return False
263 263 subpath = path[len(self.root) + 1:]
264 264 normsubpath = util.pconvert(subpath)
265 265
266 266 # XXX: Checking against the current working copy is wrong in
267 267 # the sense that it can reject things like
268 268 #
269 269 # $ hg cat -r 10 sub/x.txt
270 270 #
271 271 # if sub/ is no longer a subrepository in the working copy
272 272 # parent revision.
273 273 #
274 274 # However, it can of course also allow things that would have
275 275 # been rejected before, such as the above cat command if sub/
276 276 # is a subrepository now, but was a normal directory before.
277 277 # The old path auditor would have rejected by mistake since it
278 278 # panics when it sees sub/.hg/.
279 279 #
280 280 # All in all, checking against the working copy seems sensible
281 281 # since we want to prevent access to nested repositories on
282 282 # the filesystem *now*.
283 283 ctx = self[None]
284 284 parts = util.splitpath(subpath)
285 285 while parts:
286 286 prefix = '/'.join(parts)
287 287 if prefix in ctx.substate:
288 288 if prefix == normsubpath:
289 289 return True
290 290 else:
291 291 sub = ctx.sub(prefix)
292 292 return sub.checknested(subpath[len(prefix) + 1:])
293 293 else:
294 294 parts.pop()
295 295 return False
296 296
297 297 def peer(self):
298 298 return localpeer(self) # not cached to avoid reference cycle
299 299
300 300 def unfiltered(self):
301 301 """Return unfiltered version of the repository
302 302
303 303 Intended to be ovewritten by filtered repo."""
304 304 return self
305 305
306 def filtered(self, name):
307 """Return a filtered version of a repository"""
308 # build a new class with the mixin and the current class
309 # (possibily subclass of the repo)
310 class proxycls(repoview.repoview, self.unfiltered().__class__):
311 pass
312 return proxycls(self, name)
313
306 314 @repofilecache('bookmarks')
307 315 def _bookmarks(self):
308 316 return bookmarks.bmstore(self)
309 317
310 318 @repofilecache('bookmarks.current')
311 319 def _bookmarkcurrent(self):
312 320 return bookmarks.readcurrent(self)
313 321
314 322 def bookmarkheads(self, bookmark):
315 323 name = bookmark.split('@', 1)[0]
316 324 heads = []
317 325 for mark, n in self._bookmarks.iteritems():
318 326 if mark.split('@', 1)[0] == name:
319 327 heads.append(n)
320 328 return heads
321 329
322 330 @storecache('phaseroots')
323 331 def _phasecache(self):
324 332 return phases.phasecache(self, self._phasedefaults)
325 333
326 334 @storecache('obsstore')
327 335 def obsstore(self):
328 336 store = obsolete.obsstore(self.sopener)
329 337 if store and not obsolete._enabled:
330 338 # message is rare enough to not be translated
331 339 msg = 'obsolete feature not enabled but %i markers found!\n'
332 340 self.ui.warn(msg % len(list(store)))
333 341 return store
334 342
335 343 @unfilteredpropertycache
336 344 def hiddenrevs(self):
337 345 """hiddenrevs: revs that should be hidden by command and tools
338 346
339 347 This set is carried on the repo to ease initialization and lazy
340 348 loading; it'll probably move back to changelog for efficiency and
341 349 consistency reasons.
342 350
343 351 Note that the hiddenrevs will needs invalidations when
344 352 - a new changesets is added (possible unstable above extinct)
345 353 - a new obsolete marker is added (possible new extinct changeset)
346 354
347 355 hidden changesets cannot have non-hidden descendants
348 356 """
349 357 hidden = set()
350 358 if self.obsstore:
351 359 ### hide extinct changeset that are not accessible by any mean
352 360 hiddenquery = 'extinct() - ::(. + bookmark())'
353 361 hidden.update(self.revs(hiddenquery))
354 362 return hidden
355 363
356 364 @storecache('00changelog.i')
357 365 def changelog(self):
358 366 c = changelog.changelog(self.sopener)
359 367 if 'HG_PENDING' in os.environ:
360 368 p = os.environ['HG_PENDING']
361 369 if p.startswith(self.root):
362 370 c.readpending('00changelog.i.a')
363 371 return c
364 372
365 373 @storecache('00manifest.i')
366 374 def manifest(self):
367 375 return manifest.manifest(self.sopener)
368 376
369 377 @repofilecache('dirstate')
370 378 def dirstate(self):
371 379 warned = [0]
372 380 def validate(node):
373 381 try:
374 382 self.changelog.rev(node)
375 383 return node
376 384 except error.LookupError:
377 385 if not warned[0]:
378 386 warned[0] = True
379 387 self.ui.warn(_("warning: ignoring unknown"
380 388 " working parent %s!\n") % short(node))
381 389 return nullid
382 390
383 391 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 392
385 393 def __getitem__(self, changeid):
386 394 if changeid is None:
387 395 return context.workingctx(self)
388 396 return context.changectx(self, changeid)
389 397
390 398 def __contains__(self, changeid):
391 399 try:
392 400 return bool(self.lookup(changeid))
393 401 except error.RepoLookupError:
394 402 return False
395 403
396 404 def __nonzero__(self):
397 405 return True
398 406
399 407 def __len__(self):
400 408 return len(self.changelog)
401 409
402 410 def __iter__(self):
403 411 return iter(self.changelog)
404 412
405 413 def revs(self, expr, *args):
406 414 '''Return a list of revisions matching the given revset'''
407 415 expr = revset.formatspec(expr, *args)
408 416 m = revset.match(None, expr)
409 417 return [r for r in m(self, list(self))]
410 418
411 419 def set(self, expr, *args):
412 420 '''
413 421 Yield a context for each matching revision, after doing arg
414 422 replacement via revset.formatspec
415 423 '''
416 424 for r in self.revs(expr, *args):
417 425 yield self[r]
418 426
419 427 def url(self):
420 428 return 'file:' + self.root
421 429
422 430 def hook(self, name, throw=False, **args):
423 431 return hook.hook(self.ui, self, name, throw, **args)
424 432
425 433 @unfilteredmethod
426 434 def _tag(self, names, node, message, local, user, date, extra={}):
427 435 if isinstance(names, str):
428 436 names = (names,)
429 437
430 438 branches = self.branchmap()
431 439 for name in names:
432 440 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 441 local=local)
434 442 if name in branches:
435 443 self.ui.warn(_("warning: tag %s conflicts with existing"
436 444 " branch name\n") % name)
437 445
438 446 def writetags(fp, names, munge, prevtags):
439 447 fp.seek(0, 2)
440 448 if prevtags and prevtags[-1] != '\n':
441 449 fp.write('\n')
442 450 for name in names:
443 451 m = munge and munge(name) or name
444 452 if (self._tagscache.tagtypes and
445 453 name in self._tagscache.tagtypes):
446 454 old = self.tags().get(name, nullid)
447 455 fp.write('%s %s\n' % (hex(old), m))
448 456 fp.write('%s %s\n' % (hex(node), m))
449 457 fp.close()
450 458
451 459 prevtags = ''
452 460 if local:
453 461 try:
454 462 fp = self.opener('localtags', 'r+')
455 463 except IOError:
456 464 fp = self.opener('localtags', 'a')
457 465 else:
458 466 prevtags = fp.read()
459 467
460 468 # local tags are stored in the current charset
461 469 writetags(fp, names, None, prevtags)
462 470 for name in names:
463 471 self.hook('tag', node=hex(node), tag=name, local=local)
464 472 return
465 473
466 474 try:
467 475 fp = self.wfile('.hgtags', 'rb+')
468 476 except IOError, e:
469 477 if e.errno != errno.ENOENT:
470 478 raise
471 479 fp = self.wfile('.hgtags', 'ab')
472 480 else:
473 481 prevtags = fp.read()
474 482
475 483 # committed tags are stored in UTF-8
476 484 writetags(fp, names, encoding.fromlocal, prevtags)
477 485
478 486 fp.close()
479 487
480 488 self.invalidatecaches()
481 489
482 490 if '.hgtags' not in self.dirstate:
483 491 self[None].add(['.hgtags'])
484 492
485 493 m = matchmod.exact(self.root, '', ['.hgtags'])
486 494 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 495
488 496 for name in names:
489 497 self.hook('tag', node=hex(node), tag=name, local=local)
490 498
491 499 return tagnode
492 500
493 501 def tag(self, names, node, message, local, user, date):
494 502 '''tag a revision with one or more symbolic names.
495 503
496 504 names is a list of strings or, when adding a single tag, names may be a
497 505 string.
498 506
499 507 if local is True, the tags are stored in a per-repository file.
500 508 otherwise, they are stored in the .hgtags file, and a new
501 509 changeset is committed with the change.
502 510
503 511 keyword arguments:
504 512
505 513 local: whether to store tags in non-version-controlled file
506 514 (default False)
507 515
508 516 message: commit message to use if committing
509 517
510 518 user: name of user to use if committing
511 519
512 520 date: date tuple to use if committing'''
513 521
514 522 if not local:
515 523 for x in self.status()[:5]:
516 524 if '.hgtags' in x:
517 525 raise util.Abort(_('working copy of .hgtags is changed '
518 526 '(please commit .hgtags manually)'))
519 527
520 528 self.tags() # instantiate the cache
521 529 self._tag(names, node, message, local, user, date)
522 530
523 531 @filteredpropertycache
524 532 def _tagscache(self):
525 533 '''Returns a tagscache object that contains various tags related
526 534 caches.'''
527 535
528 536 # This simplifies its cache management by having one decorated
529 537 # function (this one) and the rest simply fetch things from it.
530 538 class tagscache(object):
531 539 def __init__(self):
532 540 # These two define the set of tags for this repository. tags
533 541 # maps tag name to node; tagtypes maps tag name to 'global' or
534 542 # 'local'. (Global tags are defined by .hgtags across all
535 543 # heads, and local tags are defined in .hg/localtags.)
536 544 # They constitute the in-memory cache of tags.
537 545 self.tags = self.tagtypes = None
538 546
539 547 self.nodetagscache = self.tagslist = None
540 548
541 549 cache = tagscache()
542 550 cache.tags, cache.tagtypes = self._findtags()
543 551
544 552 return cache
545 553
546 554 def tags(self):
547 555 '''return a mapping of tag to node'''
548 556 t = {}
549 557 if self.changelog.filteredrevs:
550 558 tags, tt = self._findtags()
551 559 else:
552 560 tags = self._tagscache.tags
553 561 for k, v in tags.iteritems():
554 562 try:
555 563 # ignore tags to unknown nodes
556 564 self.changelog.rev(v)
557 565 t[k] = v
558 566 except (error.LookupError, ValueError):
559 567 pass
560 568 return t
561 569
562 570 def _findtags(self):
563 571 '''Do the hard work of finding tags. Return a pair of dicts
564 572 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 573 maps tag name to a string like \'global\' or \'local\'.
566 574 Subclasses or extensions are free to add their own tags, but
567 575 should be aware that the returned dicts will be retained for the
568 576 duration of the localrepo object.'''
569 577
570 578 # XXX what tagtype should subclasses/extensions use? Currently
571 579 # mq and bookmarks add tags, but do not set the tagtype at all.
572 580 # Should each extension invent its own tag type? Should there
573 581 # be one tagtype for all such "virtual" tags? Or is the status
574 582 # quo fine?
575 583
576 584 alltags = {} # map tag name to (node, hist)
577 585 tagtypes = {}
578 586
579 587 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 588 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 589
582 590 # Build the return dicts. Have to re-encode tag names because
583 591 # the tags module always uses UTF-8 (in order not to lose info
584 592 # writing to the cache), but the rest of Mercurial wants them in
585 593 # local encoding.
586 594 tags = {}
587 595 for (name, (node, hist)) in alltags.iteritems():
588 596 if node != nullid:
589 597 tags[encoding.tolocal(name)] = node
590 598 tags['tip'] = self.changelog.tip()
591 599 tagtypes = dict([(encoding.tolocal(name), value)
592 600 for (name, value) in tagtypes.iteritems()])
593 601 return (tags, tagtypes)
594 602
595 603 def tagtype(self, tagname):
596 604 '''
597 605 return the type of the given tag. result can be:
598 606
599 607 'local' : a local tag
600 608 'global' : a global tag
601 609 None : tag does not exist
602 610 '''
603 611
604 612 return self._tagscache.tagtypes.get(tagname)
605 613
606 614 def tagslist(self):
607 615 '''return a list of tags ordered by revision'''
608 616 if not self._tagscache.tagslist:
609 617 l = []
610 618 for t, n in self.tags().iteritems():
611 619 r = self.changelog.rev(n)
612 620 l.append((r, t, n))
613 621 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 622
615 623 return self._tagscache.tagslist
616 624
617 625 def nodetags(self, node):
618 626 '''return the tags associated with a node'''
619 627 if not self._tagscache.nodetagscache:
620 628 nodetagscache = {}
621 629 for t, n in self._tagscache.tags.iteritems():
622 630 nodetagscache.setdefault(n, []).append(t)
623 631 for tags in nodetagscache.itervalues():
624 632 tags.sort()
625 633 self._tagscache.nodetagscache = nodetagscache
626 634 return self._tagscache.nodetagscache.get(node, [])
627 635
628 636 def nodebookmarks(self, node):
629 637 marks = []
630 638 for bookmark, n in self._bookmarks.iteritems():
631 639 if n == node:
632 640 marks.append(bookmark)
633 641 return sorted(marks)
634 642
635 643 def _branchtags(self, partial, lrev):
636 644 # TODO: rename this function?
637 645 tiprev = len(self) - 1
638 646 if lrev != tiprev:
639 647 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
640 648 self._updatebranchcache(partial, ctxgen)
641 649 self._writebranchcache(partial, self.changelog.tip(), tiprev)
642 650
643 651 return partial
644 652
645 653 @unfilteredmethod # Until we get a smarter cache management
646 654 def updatebranchcache(self):
647 655 tip = self.changelog.tip()
648 656 if self._branchcache is not None and self._branchcachetip == tip:
649 657 return
650 658
651 659 oldtip = self._branchcachetip
652 660 self._branchcachetip = tip
653 661 if oldtip is None or oldtip not in self.changelog.nodemap:
654 662 partial, last, lrev = self._readbranchcache()
655 663 else:
656 664 lrev = self.changelog.rev(oldtip)
657 665 partial = self._branchcache
658 666
659 667 self._branchtags(partial, lrev)
660 668 # this private cache holds all heads (not just the branch tips)
661 669 self._branchcache = partial
662 670
663 671 def branchmap(self):
664 672 '''returns a dictionary {branch: [branchheads]}'''
665 673 if self.changelog.filteredrevs:
666 674 # some changeset are excluded we can't use the cache
667 675 branchmap = {}
668 676 self._updatebranchcache(branchmap, (self[r] for r in self))
669 677 return branchmap
670 678 else:
671 679 self.updatebranchcache()
672 680 return self._branchcache
673 681
674 682
675 683 def _branchtip(self, heads):
676 684 '''return the tipmost branch head in heads'''
677 685 tip = heads[-1]
678 686 for h in reversed(heads):
679 687 if not self[h].closesbranch():
680 688 tip = h
681 689 break
682 690 return tip
683 691
684 692 def branchtip(self, branch):
685 693 '''return the tip node for a given branch'''
686 694 if branch not in self.branchmap():
687 695 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
688 696 return self._branchtip(self.branchmap()[branch])
689 697
690 698 def branchtags(self):
691 699 '''return a dict where branch names map to the tipmost head of
692 700 the branch, open heads come before closed'''
693 701 bt = {}
694 702 for bn, heads in self.branchmap().iteritems():
695 703 bt[bn] = self._branchtip(heads)
696 704 return bt
697 705
698 706 @unfilteredmethod # Until we get a smarter cache management
699 707 def _readbranchcache(self):
700 708 partial = {}
701 709 try:
702 710 f = self.opener("cache/branchheads")
703 711 lines = f.read().split('\n')
704 712 f.close()
705 713 except (IOError, OSError):
706 714 return {}, nullid, nullrev
707 715
708 716 try:
709 717 last, lrev = lines.pop(0).split(" ", 1)
710 718 last, lrev = bin(last), int(lrev)
711 719 if lrev >= len(self) or self[lrev].node() != last:
712 720 # invalidate the cache
713 721 raise ValueError('invalidating branch cache (tip differs)')
714 722 for l in lines:
715 723 if not l:
716 724 continue
717 725 node, label = l.split(" ", 1)
718 726 label = encoding.tolocal(label.strip())
719 727 if not node in self:
720 728 raise ValueError('invalidating branch cache because node '+
721 729 '%s does not exist' % node)
722 730 partial.setdefault(label, []).append(bin(node))
723 731 except KeyboardInterrupt:
724 732 raise
725 733 except Exception, inst:
726 734 if self.ui.debugflag:
727 735 self.ui.warn(str(inst), '\n')
728 736 partial, last, lrev = {}, nullid, nullrev
729 737 return partial, last, lrev
730 738
731 739 @unfilteredmethod # Until we get a smarter cache management
732 740 def _writebranchcache(self, branches, tip, tiprev):
733 741 try:
734 742 f = self.opener("cache/branchheads", "w", atomictemp=True)
735 743 f.write("%s %s\n" % (hex(tip), tiprev))
736 744 for label, nodes in branches.iteritems():
737 745 for node in nodes:
738 746 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
739 747 f.close()
740 748 except (IOError, OSError):
741 749 pass
742 750
743 751 @unfilteredmethod # Until we get a smarter cache management
744 752 def _updatebranchcache(self, partial, ctxgen):
745 753 """Given a branchhead cache, partial, that may have extra nodes or be
746 754 missing heads, and a generator of nodes that are at least a superset of
747 755 heads missing, this function updates partial to be correct.
748 756 """
749 757 # collect new branch entries
750 758 newbranches = {}
751 759 for c in ctxgen:
752 760 newbranches.setdefault(c.branch(), []).append(c.node())
753 761 # if older branchheads are reachable from new ones, they aren't
754 762 # really branchheads. Note checking parents is insufficient:
755 763 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
756 764 for branch, newnodes in newbranches.iteritems():
757 765 bheads = partial.setdefault(branch, [])
758 766 # Remove candidate heads that no longer are in the repo (e.g., as
759 767 # the result of a strip that just happened). Avoid using 'node in
760 768 # self' here because that dives down into branchcache code somewhat
761 769 # recursively.
762 770 bheadrevs = [self.changelog.rev(node) for node in bheads
763 771 if self.changelog.hasnode(node)]
764 772 newheadrevs = [self.changelog.rev(node) for node in newnodes
765 773 if self.changelog.hasnode(node)]
766 774 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
767 775 # Remove duplicates - nodes that are in newheadrevs and are already
768 776 # in bheadrevs. This can happen if you strip a node whose parent
769 777 # was already a head (because they're on different branches).
770 778 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
771 779
772 780 # Starting from tip means fewer passes over reachable. If we know
773 781 # the new candidates are not ancestors of existing heads, we don't
774 782 # have to examine ancestors of existing heads
775 783 if ctxisnew:
776 784 iterrevs = sorted(newheadrevs)
777 785 else:
778 786 iterrevs = list(bheadrevs)
779 787
780 788 # This loop prunes out two kinds of heads - heads that are
781 789 # superseded by a head in newheadrevs, and newheadrevs that are not
782 790 # heads because an existing head is their descendant.
783 791 while iterrevs:
784 792 latest = iterrevs.pop()
785 793 if latest not in bheadrevs:
786 794 continue
787 795 ancestors = set(self.changelog.ancestors([latest],
788 796 bheadrevs[0]))
789 797 if ancestors:
790 798 bheadrevs = [b for b in bheadrevs if b not in ancestors]
791 799 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
792 800
793 801 # There may be branches that cease to exist when the last commit in the
794 802 # branch was stripped. This code filters them out. Note that the
795 803 # branch that ceased to exist may not be in newbranches because
796 804 # newbranches is the set of candidate heads, which when you strip the
797 805 # last commit in a branch will be the parent branch.
798 806 for branch in partial.keys():
799 807 nodes = [head for head in partial[branch]
800 808 if self.changelog.hasnode(head)]
801 809 if not nodes:
802 810 del partial[branch]
803 811
804 812 def lookup(self, key):
805 813 return self[key].node()
806 814
807 815 def lookupbranch(self, key, remote=None):
808 816 repo = remote or self
809 817 if key in repo.branchmap():
810 818 return key
811 819
812 820 repo = (remote and remote.local()) and remote or self
813 821 return repo[key].branch()
814 822
815 823 def known(self, nodes):
816 824 nm = self.changelog.nodemap
817 825 pc = self._phasecache
818 826 result = []
819 827 for n in nodes:
820 828 r = nm.get(n)
821 829 resp = not (r is None or pc.phase(self, r) >= phases.secret)
822 830 result.append(resp)
823 831 return result
824 832
825 833 def local(self):
826 834 return self
827 835
828 836 def cancopy(self):
829 837 return self.local() # so statichttprepo's override of local() works
830 838
831 839 def join(self, f):
832 840 return os.path.join(self.path, f)
833 841
834 842 def wjoin(self, f):
835 843 return os.path.join(self.root, f)
836 844
837 845 def file(self, f):
838 846 if f[0] == '/':
839 847 f = f[1:]
840 848 return filelog.filelog(self.sopener, f)
841 849
842 850 def changectx(self, changeid):
843 851 return self[changeid]
844 852
845 853 def parents(self, changeid=None):
846 854 '''get list of changectxs for parents of changeid'''
847 855 return self[changeid].parents()
848 856
849 857 def setparents(self, p1, p2=nullid):
850 858 copies = self.dirstate.setparents(p1, p2)
851 859 if copies:
852 860 # Adjust copy records, the dirstate cannot do it, it
853 861 # requires access to parents manifests. Preserve them
854 862 # only for entries added to first parent.
855 863 pctx = self[p1]
856 864 for f in copies:
857 865 if f not in pctx and copies[f] in pctx:
858 866 self.dirstate.copy(copies[f], f)
859 867
860 868 def filectx(self, path, changeid=None, fileid=None):
861 869 """changeid can be a changeset revision, node, or tag.
862 870 fileid can be a file revision or node."""
863 871 return context.filectx(self, path, changeid, fileid)
864 872
865 873 def getcwd(self):
866 874 return self.dirstate.getcwd()
867 875
868 876 def pathto(self, f, cwd=None):
869 877 return self.dirstate.pathto(f, cwd)
870 878
871 879 def wfile(self, f, mode='r'):
872 880 return self.wopener(f, mode)
873 881
874 882 def _link(self, f):
875 883 return os.path.islink(self.wjoin(f))
876 884
877 885 def _loadfilter(self, filter):
878 886 if filter not in self.filterpats:
879 887 l = []
880 888 for pat, cmd in self.ui.configitems(filter):
881 889 if cmd == '!':
882 890 continue
883 891 mf = matchmod.match(self.root, '', [pat])
884 892 fn = None
885 893 params = cmd
886 894 for name, filterfn in self._datafilters.iteritems():
887 895 if cmd.startswith(name):
888 896 fn = filterfn
889 897 params = cmd[len(name):].lstrip()
890 898 break
891 899 if not fn:
892 900 fn = lambda s, c, **kwargs: util.filter(s, c)
893 901 # Wrap old filters not supporting keyword arguments
894 902 if not inspect.getargspec(fn)[2]:
895 903 oldfn = fn
896 904 fn = lambda s, c, **kwargs: oldfn(s, c)
897 905 l.append((mf, fn, params))
898 906 self.filterpats[filter] = l
899 907 return self.filterpats[filter]
900 908
901 909 def _filter(self, filterpats, filename, data):
902 910 for mf, fn, cmd in filterpats:
903 911 if mf(filename):
904 912 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
905 913 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
906 914 break
907 915
908 916 return data
909 917
910 918 @unfilteredpropertycache
911 919 def _encodefilterpats(self):
912 920 return self._loadfilter('encode')
913 921
914 922 @unfilteredpropertycache
915 923 def _decodefilterpats(self):
916 924 return self._loadfilter('decode')
917 925
918 926 def adddatafilter(self, name, filter):
919 927 self._datafilters[name] = filter
920 928
921 929 def wread(self, filename):
922 930 if self._link(filename):
923 931 data = os.readlink(self.wjoin(filename))
924 932 else:
925 933 data = self.wopener.read(filename)
926 934 return self._filter(self._encodefilterpats, filename, data)
927 935
928 936 def wwrite(self, filename, data, flags):
929 937 data = self._filter(self._decodefilterpats, filename, data)
930 938 if 'l' in flags:
931 939 self.wopener.symlink(data, filename)
932 940 else:
933 941 self.wopener.write(filename, data)
934 942 if 'x' in flags:
935 943 util.setflags(self.wjoin(filename), False, True)
936 944
937 945 def wwritedata(self, filename, data):
938 946 return self._filter(self._decodefilterpats, filename, data)
939 947
940 948 def transaction(self, desc):
941 949 tr = self._transref and self._transref() or None
942 950 if tr and tr.running():
943 951 return tr.nest()
944 952
945 953 # abort here if the journal already exists
946 954 if os.path.exists(self.sjoin("journal")):
947 955 raise error.RepoError(
948 956 _("abandoned transaction found - run hg recover"))
949 957
950 958 self._writejournal(desc)
951 959 renames = [(x, undoname(x)) for x in self._journalfiles()]
952 960
953 961 tr = transaction.transaction(self.ui.warn, self.sopener,
954 962 self.sjoin("journal"),
955 963 aftertrans(renames),
956 964 self.store.createmode)
957 965 self._transref = weakref.ref(tr)
958 966 return tr
959 967
960 968 def _journalfiles(self):
961 969 return (self.sjoin('journal'), self.join('journal.dirstate'),
962 970 self.join('journal.branch'), self.join('journal.desc'),
963 971 self.join('journal.bookmarks'),
964 972 self.sjoin('journal.phaseroots'))
965 973
966 974 def undofiles(self):
967 975 return [undoname(x) for x in self._journalfiles()]
968 976
969 977 def _writejournal(self, desc):
970 978 self.opener.write("journal.dirstate",
971 979 self.opener.tryread("dirstate"))
972 980 self.opener.write("journal.branch",
973 981 encoding.fromlocal(self.dirstate.branch()))
974 982 self.opener.write("journal.desc",
975 983 "%d\n%s\n" % (len(self), desc))
976 984 self.opener.write("journal.bookmarks",
977 985 self.opener.tryread("bookmarks"))
978 986 self.sopener.write("journal.phaseroots",
979 987 self.sopener.tryread("phaseroots"))
980 988
981 989 def recover(self):
982 990 lock = self.lock()
983 991 try:
984 992 if os.path.exists(self.sjoin("journal")):
985 993 self.ui.status(_("rolling back interrupted transaction\n"))
986 994 transaction.rollback(self.sopener, self.sjoin("journal"),
987 995 self.ui.warn)
988 996 self.invalidate()
989 997 return True
990 998 else:
991 999 self.ui.warn(_("no interrupted transaction available\n"))
992 1000 return False
993 1001 finally:
994 1002 lock.release()
995 1003
996 1004 def rollback(self, dryrun=False, force=False):
997 1005 wlock = lock = None
998 1006 try:
999 1007 wlock = self.wlock()
1000 1008 lock = self.lock()
1001 1009 if os.path.exists(self.sjoin("undo")):
1002 1010 return self._rollback(dryrun, force)
1003 1011 else:
1004 1012 self.ui.warn(_("no rollback information available\n"))
1005 1013 return 1
1006 1014 finally:
1007 1015 release(lock, wlock)
1008 1016
1009 1017 @unfilteredmethod # Until we get smarter cache management
1010 1018 def _rollback(self, dryrun, force):
1011 1019 ui = self.ui
1012 1020 try:
1013 1021 args = self.opener.read('undo.desc').splitlines()
1014 1022 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1015 1023 if len(args) >= 3:
1016 1024 detail = args[2]
1017 1025 oldtip = oldlen - 1
1018 1026
1019 1027 if detail and ui.verbose:
1020 1028 msg = (_('repository tip rolled back to revision %s'
1021 1029 ' (undo %s: %s)\n')
1022 1030 % (oldtip, desc, detail))
1023 1031 else:
1024 1032 msg = (_('repository tip rolled back to revision %s'
1025 1033 ' (undo %s)\n')
1026 1034 % (oldtip, desc))
1027 1035 except IOError:
1028 1036 msg = _('rolling back unknown transaction\n')
1029 1037 desc = None
1030 1038
1031 1039 if not force and self['.'] != self['tip'] and desc == 'commit':
1032 1040 raise util.Abort(
1033 1041 _('rollback of last commit while not checked out '
1034 1042 'may lose data'), hint=_('use -f to force'))
1035 1043
1036 1044 ui.status(msg)
1037 1045 if dryrun:
1038 1046 return 0
1039 1047
1040 1048 parents = self.dirstate.parents()
1041 1049 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1042 1050 if os.path.exists(self.join('undo.bookmarks')):
1043 1051 util.rename(self.join('undo.bookmarks'),
1044 1052 self.join('bookmarks'))
1045 1053 if os.path.exists(self.sjoin('undo.phaseroots')):
1046 1054 util.rename(self.sjoin('undo.phaseroots'),
1047 1055 self.sjoin('phaseroots'))
1048 1056 self.invalidate()
1049 1057
1050 1058 # Discard all cache entries to force reloading everything.
1051 1059 self._filecache.clear()
1052 1060
1053 1061 parentgone = (parents[0] not in self.changelog.nodemap or
1054 1062 parents[1] not in self.changelog.nodemap)
1055 1063 if parentgone:
1056 1064 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1057 1065 try:
1058 1066 branch = self.opener.read('undo.branch')
1059 1067 self.dirstate.setbranch(encoding.tolocal(branch))
1060 1068 except IOError:
1061 1069 ui.warn(_('named branch could not be reset: '
1062 1070 'current branch is still \'%s\'\n')
1063 1071 % self.dirstate.branch())
1064 1072
1065 1073 self.dirstate.invalidate()
1066 1074 parents = tuple([p.rev() for p in self.parents()])
1067 1075 if len(parents) > 1:
1068 1076 ui.status(_('working directory now based on '
1069 1077 'revisions %d and %d\n') % parents)
1070 1078 else:
1071 1079 ui.status(_('working directory now based on '
1072 1080 'revision %d\n') % parents)
1073 1081 # TODO: if we know which new heads may result from this rollback, pass
1074 1082 # them to destroy(), which will prevent the branchhead cache from being
1075 1083 # invalidated.
1076 1084 self.destroyed()
1077 1085 return 0
1078 1086
1079 1087 def invalidatecaches(self):
1080 1088
1081 1089 if '_tagscache' in vars(self):
1082 1090 # can't use delattr on proxy
1083 1091 del self.__dict__['_tagscache']
1084 1092
1085 1093 self.unfiltered()._branchcache = None # in UTF-8
1086 1094 self.unfiltered()._branchcachetip = None
1087 1095 obsolete.clearobscaches(self)
1088 1096
1089 1097 def invalidatedirstate(self):
1090 1098 '''Invalidates the dirstate, causing the next call to dirstate
1091 1099 to check if it was modified since the last time it was read,
1092 1100 rereading it if it has.
1093 1101
1094 1102 This is different to dirstate.invalidate() that it doesn't always
1095 1103 rereads the dirstate. Use dirstate.invalidate() if you want to
1096 1104 explicitly read the dirstate again (i.e. restoring it to a previous
1097 1105 known good state).'''
1098 1106 if hasunfilteredcache(self, 'dirstate'):
1099 1107 for k in self.dirstate._filecache:
1100 1108 try:
1101 1109 delattr(self.dirstate, k)
1102 1110 except AttributeError:
1103 1111 pass
1104 1112 delattr(self.unfiltered(), 'dirstate')
1105 1113
1106 1114 def invalidate(self):
1107 1115 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1108 1116 for k in self._filecache:
1109 1117 # dirstate is invalidated separately in invalidatedirstate()
1110 1118 if k == 'dirstate':
1111 1119 continue
1112 1120
1113 1121 try:
1114 1122 delattr(unfiltered, k)
1115 1123 except AttributeError:
1116 1124 pass
1117 1125 self.invalidatecaches()
1118 1126
1119 1127 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1120 1128 try:
1121 1129 l = lock.lock(lockname, 0, releasefn, desc=desc)
1122 1130 except error.LockHeld, inst:
1123 1131 if not wait:
1124 1132 raise
1125 1133 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1126 1134 (desc, inst.locker))
1127 1135 # default to 600 seconds timeout
1128 1136 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1129 1137 releasefn, desc=desc)
1130 1138 if acquirefn:
1131 1139 acquirefn()
1132 1140 return l
1133 1141
1134 1142 def _afterlock(self, callback):
1135 1143 """add a callback to the current repository lock.
1136 1144
1137 1145 The callback will be executed on lock release."""
1138 1146 l = self._lockref and self._lockref()
1139 1147 if l:
1140 1148 l.postrelease.append(callback)
1141 1149 else:
1142 1150 callback()
1143 1151
1144 1152 def lock(self, wait=True):
1145 1153 '''Lock the repository store (.hg/store) and return a weak reference
1146 1154 to the lock. Use this before modifying the store (e.g. committing or
1147 1155 stripping). If you are opening a transaction, get a lock as well.)'''
1148 1156 l = self._lockref and self._lockref()
1149 1157 if l is not None and l.held:
1150 1158 l.lock()
1151 1159 return l
1152 1160
1153 1161 def unlock():
1154 1162 self.store.write()
1155 1163 if hasunfilteredcache(self, '_phasecache'):
1156 1164 self._phasecache.write()
1157 1165 for k, ce in self._filecache.items():
1158 1166 if k == 'dirstate':
1159 1167 continue
1160 1168 ce.refresh()
1161 1169
1162 1170 l = self._lock(self.sjoin("lock"), wait, unlock,
1163 1171 self.invalidate, _('repository %s') % self.origroot)
1164 1172 self._lockref = weakref.ref(l)
1165 1173 return l
1166 1174
1167 1175 def wlock(self, wait=True):
1168 1176 '''Lock the non-store parts of the repository (everything under
1169 1177 .hg except .hg/store) and return a weak reference to the lock.
1170 1178 Use this before modifying files in .hg.'''
1171 1179 l = self._wlockref and self._wlockref()
1172 1180 if l is not None and l.held:
1173 1181 l.lock()
1174 1182 return l
1175 1183
1176 1184 def unlock():
1177 1185 self.dirstate.write()
1178 1186 ce = self._filecache.get('dirstate')
1179 1187 if ce:
1180 1188 ce.refresh()
1181 1189
1182 1190 l = self._lock(self.join("wlock"), wait, unlock,
1183 1191 self.invalidatedirstate, _('working directory of %s') %
1184 1192 self.origroot)
1185 1193 self._wlockref = weakref.ref(l)
1186 1194 return l
1187 1195
1188 1196 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1189 1197 """
1190 1198 commit an individual file as part of a larger transaction
1191 1199 """
1192 1200
1193 1201 fname = fctx.path()
1194 1202 text = fctx.data()
1195 1203 flog = self.file(fname)
1196 1204 fparent1 = manifest1.get(fname, nullid)
1197 1205 fparent2 = fparent2o = manifest2.get(fname, nullid)
1198 1206
1199 1207 meta = {}
1200 1208 copy = fctx.renamed()
1201 1209 if copy and copy[0] != fname:
1202 1210 # Mark the new revision of this file as a copy of another
1203 1211 # file. This copy data will effectively act as a parent
1204 1212 # of this new revision. If this is a merge, the first
1205 1213 # parent will be the nullid (meaning "look up the copy data")
1206 1214 # and the second one will be the other parent. For example:
1207 1215 #
1208 1216 # 0 --- 1 --- 3 rev1 changes file foo
1209 1217 # \ / rev2 renames foo to bar and changes it
1210 1218 # \- 2 -/ rev3 should have bar with all changes and
1211 1219 # should record that bar descends from
1212 1220 # bar in rev2 and foo in rev1
1213 1221 #
1214 1222 # this allows this merge to succeed:
1215 1223 #
1216 1224 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1217 1225 # \ / merging rev3 and rev4 should use bar@rev2
1218 1226 # \- 2 --- 4 as the merge base
1219 1227 #
1220 1228
1221 1229 cfname = copy[0]
1222 1230 crev = manifest1.get(cfname)
1223 1231 newfparent = fparent2
1224 1232
1225 1233 if manifest2: # branch merge
1226 1234 if fparent2 == nullid or crev is None: # copied on remote side
1227 1235 if cfname in manifest2:
1228 1236 crev = manifest2[cfname]
1229 1237 newfparent = fparent1
1230 1238
1231 1239 # find source in nearest ancestor if we've lost track
1232 1240 if not crev:
1233 1241 self.ui.debug(" %s: searching for copy revision for %s\n" %
1234 1242 (fname, cfname))
1235 1243 for ancestor in self[None].ancestors():
1236 1244 if cfname in ancestor:
1237 1245 crev = ancestor[cfname].filenode()
1238 1246 break
1239 1247
1240 1248 if crev:
1241 1249 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1242 1250 meta["copy"] = cfname
1243 1251 meta["copyrev"] = hex(crev)
1244 1252 fparent1, fparent2 = nullid, newfparent
1245 1253 else:
1246 1254 self.ui.warn(_("warning: can't find ancestor for '%s' "
1247 1255 "copied from '%s'!\n") % (fname, cfname))
1248 1256
1249 1257 elif fparent2 != nullid:
1250 1258 # is one parent an ancestor of the other?
1251 1259 fparentancestor = flog.ancestor(fparent1, fparent2)
1252 1260 if fparentancestor == fparent1:
1253 1261 fparent1, fparent2 = fparent2, nullid
1254 1262 elif fparentancestor == fparent2:
1255 1263 fparent2 = nullid
1256 1264
1257 1265 # is the file changed?
1258 1266 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1259 1267 changelist.append(fname)
1260 1268 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1261 1269
1262 1270 # are just the flags changed during merge?
1263 1271 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1264 1272 changelist.append(fname)
1265 1273
1266 1274 return fparent1
1267 1275
1268 1276 @unfilteredmethod
1269 1277 def commit(self, text="", user=None, date=None, match=None, force=False,
1270 1278 editor=False, extra={}):
1271 1279 """Add a new revision to current repository.
1272 1280
1273 1281 Revision information is gathered from the working directory,
1274 1282 match can be used to filter the committed files. If editor is
1275 1283 supplied, it is called to get a commit message.
1276 1284 """
1277 1285
1278 1286 def fail(f, msg):
1279 1287 raise util.Abort('%s: %s' % (f, msg))
1280 1288
1281 1289 if not match:
1282 1290 match = matchmod.always(self.root, '')
1283 1291
1284 1292 if not force:
1285 1293 vdirs = []
1286 1294 match.dir = vdirs.append
1287 1295 match.bad = fail
1288 1296
1289 1297 wlock = self.wlock()
1290 1298 try:
1291 1299 wctx = self[None]
1292 1300 merge = len(wctx.parents()) > 1
1293 1301
1294 1302 if (not force and merge and match and
1295 1303 (match.files() or match.anypats())):
1296 1304 raise util.Abort(_('cannot partially commit a merge '
1297 1305 '(do not specify files or patterns)'))
1298 1306
1299 1307 changes = self.status(match=match, clean=force)
1300 1308 if force:
1301 1309 changes[0].extend(changes[6]) # mq may commit unchanged files
1302 1310
1303 1311 # check subrepos
1304 1312 subs = []
1305 1313 commitsubs = set()
1306 1314 newstate = wctx.substate.copy()
1307 1315 # only manage subrepos and .hgsubstate if .hgsub is present
1308 1316 if '.hgsub' in wctx:
1309 1317 # we'll decide whether to track this ourselves, thanks
1310 1318 if '.hgsubstate' in changes[0]:
1311 1319 changes[0].remove('.hgsubstate')
1312 1320 if '.hgsubstate' in changes[2]:
1313 1321 changes[2].remove('.hgsubstate')
1314 1322
1315 1323 # compare current state to last committed state
1316 1324 # build new substate based on last committed state
1317 1325 oldstate = wctx.p1().substate
1318 1326 for s in sorted(newstate.keys()):
1319 1327 if not match(s):
1320 1328 # ignore working copy, use old state if present
1321 1329 if s in oldstate:
1322 1330 newstate[s] = oldstate[s]
1323 1331 continue
1324 1332 if not force:
1325 1333 raise util.Abort(
1326 1334 _("commit with new subrepo %s excluded") % s)
1327 1335 if wctx.sub(s).dirty(True):
1328 1336 if not self.ui.configbool('ui', 'commitsubrepos'):
1329 1337 raise util.Abort(
1330 1338 _("uncommitted changes in subrepo %s") % s,
1331 1339 hint=_("use --subrepos for recursive commit"))
1332 1340 subs.append(s)
1333 1341 commitsubs.add(s)
1334 1342 else:
1335 1343 bs = wctx.sub(s).basestate()
1336 1344 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1337 1345 if oldstate.get(s, (None, None, None))[1] != bs:
1338 1346 subs.append(s)
1339 1347
1340 1348 # check for removed subrepos
1341 1349 for p in wctx.parents():
1342 1350 r = [s for s in p.substate if s not in newstate]
1343 1351 subs += [s for s in r if match(s)]
1344 1352 if subs:
1345 1353 if (not match('.hgsub') and
1346 1354 '.hgsub' in (wctx.modified() + wctx.added())):
1347 1355 raise util.Abort(
1348 1356 _("can't commit subrepos without .hgsub"))
1349 1357 changes[0].insert(0, '.hgsubstate')
1350 1358
1351 1359 elif '.hgsub' in changes[2]:
1352 1360 # clean up .hgsubstate when .hgsub is removed
1353 1361 if ('.hgsubstate' in wctx and
1354 1362 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1355 1363 changes[2].insert(0, '.hgsubstate')
1356 1364
1357 1365 # make sure all explicit patterns are matched
1358 1366 if not force and match.files():
1359 1367 matched = set(changes[0] + changes[1] + changes[2])
1360 1368
1361 1369 for f in match.files():
1362 1370 f = self.dirstate.normalize(f)
1363 1371 if f == '.' or f in matched or f in wctx.substate:
1364 1372 continue
1365 1373 if f in changes[3]: # missing
1366 1374 fail(f, _('file not found!'))
1367 1375 if f in vdirs: # visited directory
1368 1376 d = f + '/'
1369 1377 for mf in matched:
1370 1378 if mf.startswith(d):
1371 1379 break
1372 1380 else:
1373 1381 fail(f, _("no match under directory!"))
1374 1382 elif f not in self.dirstate:
1375 1383 fail(f, _("file not tracked!"))
1376 1384
1377 1385 if (not force and not extra.get("close") and not merge
1378 1386 and not (changes[0] or changes[1] or changes[2])
1379 1387 and wctx.branch() == wctx.p1().branch()):
1380 1388 return None
1381 1389
1382 1390 if merge and changes[3]:
1383 1391 raise util.Abort(_("cannot commit merge with missing files"))
1384 1392
1385 1393 ms = mergemod.mergestate(self)
1386 1394 for f in changes[0]:
1387 1395 if f in ms and ms[f] == 'u':
1388 1396 raise util.Abort(_("unresolved merge conflicts "
1389 1397 "(see hg help resolve)"))
1390 1398
1391 1399 cctx = context.workingctx(self, text, user, date, extra, changes)
1392 1400 if editor:
1393 1401 cctx._text = editor(self, cctx, subs)
1394 1402 edited = (text != cctx._text)
1395 1403
1396 1404 # commit subs and write new state
1397 1405 if subs:
1398 1406 for s in sorted(commitsubs):
1399 1407 sub = wctx.sub(s)
1400 1408 self.ui.status(_('committing subrepository %s\n') %
1401 1409 subrepo.subrelpath(sub))
1402 1410 sr = sub.commit(cctx._text, user, date)
1403 1411 newstate[s] = (newstate[s][0], sr)
1404 1412 subrepo.writestate(self, newstate)
1405 1413
1406 1414 # Save commit message in case this transaction gets rolled back
1407 1415 # (e.g. by a pretxncommit hook). Leave the content alone on
1408 1416 # the assumption that the user will use the same editor again.
1409 1417 msgfn = self.savecommitmessage(cctx._text)
1410 1418
1411 1419 p1, p2 = self.dirstate.parents()
1412 1420 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1413 1421 try:
1414 1422 self.hook("precommit", throw=True, parent1=hookp1,
1415 1423 parent2=hookp2)
1416 1424 ret = self.commitctx(cctx, True)
1417 1425 except: # re-raises
1418 1426 if edited:
1419 1427 self.ui.write(
1420 1428 _('note: commit message saved in %s\n') % msgfn)
1421 1429 raise
1422 1430
1423 1431 # update bookmarks, dirstate and mergestate
1424 1432 bookmarks.update(self, [p1, p2], ret)
1425 1433 for f in changes[0] + changes[1]:
1426 1434 self.dirstate.normal(f)
1427 1435 for f in changes[2]:
1428 1436 self.dirstate.drop(f)
1429 1437 self.dirstate.setparents(ret)
1430 1438 ms.reset()
1431 1439 finally:
1432 1440 wlock.release()
1433 1441
1434 1442 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1435 1443 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1436 1444 self._afterlock(commithook)
1437 1445 return ret
1438 1446
1439 1447 @unfilteredmethod
1440 1448 def commitctx(self, ctx, error=False):
1441 1449 """Add a new revision to current repository.
1442 1450 Revision information is passed via the context argument.
1443 1451 """
1444 1452
1445 1453 tr = lock = None
1446 1454 removed = list(ctx.removed())
1447 1455 p1, p2 = ctx.p1(), ctx.p2()
1448 1456 user = ctx.user()
1449 1457
1450 1458 lock = self.lock()
1451 1459 try:
1452 1460 tr = self.transaction("commit")
1453 1461 trp = weakref.proxy(tr)
1454 1462
1455 1463 if ctx.files():
1456 1464 m1 = p1.manifest().copy()
1457 1465 m2 = p2.manifest()
1458 1466
1459 1467 # check in files
1460 1468 new = {}
1461 1469 changed = []
1462 1470 linkrev = len(self)
1463 1471 for f in sorted(ctx.modified() + ctx.added()):
1464 1472 self.ui.note(f + "\n")
1465 1473 try:
1466 1474 fctx = ctx[f]
1467 1475 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1468 1476 changed)
1469 1477 m1.set(f, fctx.flags())
1470 1478 except OSError, inst:
1471 1479 self.ui.warn(_("trouble committing %s!\n") % f)
1472 1480 raise
1473 1481 except IOError, inst:
1474 1482 errcode = getattr(inst, 'errno', errno.ENOENT)
1475 1483 if error or errcode and errcode != errno.ENOENT:
1476 1484 self.ui.warn(_("trouble committing %s!\n") % f)
1477 1485 raise
1478 1486 else:
1479 1487 removed.append(f)
1480 1488
1481 1489 # update manifest
1482 1490 m1.update(new)
1483 1491 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1484 1492 drop = [f for f in removed if f in m1]
1485 1493 for f in drop:
1486 1494 del m1[f]
1487 1495 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1488 1496 p2.manifestnode(), (new, drop))
1489 1497 files = changed + removed
1490 1498 else:
1491 1499 mn = p1.manifestnode()
1492 1500 files = []
1493 1501
1494 1502 # update changelog
1495 1503 self.changelog.delayupdate()
1496 1504 n = self.changelog.add(mn, files, ctx.description(),
1497 1505 trp, p1.node(), p2.node(),
1498 1506 user, ctx.date(), ctx.extra().copy())
1499 1507 p = lambda: self.changelog.writepending() and self.root or ""
1500 1508 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1501 1509 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1502 1510 parent2=xp2, pending=p)
1503 1511 self.changelog.finalize(trp)
1504 1512 # set the new commit is proper phase
1505 1513 targetphase = phases.newcommitphase(self.ui)
1506 1514 if targetphase:
1507 1515 # retract boundary do not alter parent changeset.
1508 1516 # if a parent have higher the resulting phase will
1509 1517 # be compliant anyway
1510 1518 #
1511 1519 # if minimal phase was 0 we don't need to retract anything
1512 1520 phases.retractboundary(self, targetphase, [n])
1513 1521 tr.close()
1514 1522 self.updatebranchcache()
1515 1523 return n
1516 1524 finally:
1517 1525 if tr:
1518 1526 tr.release()
1519 1527 lock.release()
1520 1528
1521 1529 @unfilteredmethod
1522 1530 def destroyed(self, newheadnodes=None):
1523 1531 '''Inform the repository that nodes have been destroyed.
1524 1532 Intended for use by strip and rollback, so there's a common
1525 1533 place for anything that has to be done after destroying history.
1526 1534
1527 1535 If you know the branchheadcache was uptodate before nodes were removed
1528 1536 and you also know the set of candidate new heads that may have resulted
1529 1537 from the destruction, you can set newheadnodes. This will enable the
1530 1538 code to update the branchheads cache, rather than having future code
1531 1539 decide it's invalid and regenerating it from scratch.
1532 1540 '''
1533 1541 # If we have info, newheadnodes, on how to update the branch cache, do
1534 1542 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1535 1543 # will be caught the next time it is read.
1536 1544 if newheadnodes:
1537 1545 tiprev = len(self) - 1
1538 1546 ctxgen = (self[node] for node in newheadnodes
1539 1547 if self.changelog.hasnode(node))
1540 1548 self._updatebranchcache(self._branchcache, ctxgen)
1541 1549 self._writebranchcache(self._branchcache, self.changelog.tip(),
1542 1550 tiprev)
1543 1551
1544 1552 # Ensure the persistent tag cache is updated. Doing it now
1545 1553 # means that the tag cache only has to worry about destroyed
1546 1554 # heads immediately after a strip/rollback. That in turn
1547 1555 # guarantees that "cachetip == currenttip" (comparing both rev
1548 1556 # and node) always means no nodes have been added or destroyed.
1549 1557
1550 1558 # XXX this is suboptimal when qrefresh'ing: we strip the current
1551 1559 # head, refresh the tag cache, then immediately add a new head.
1552 1560 # But I think doing it this way is necessary for the "instant
1553 1561 # tag cache retrieval" case to work.
1554 1562 self.invalidatecaches()
1555 1563
1556 1564 # Discard all cache entries to force reloading everything.
1557 1565 self._filecache.clear()
1558 1566
1559 1567 def walk(self, match, node=None):
1560 1568 '''
1561 1569 walk recursively through the directory tree or a given
1562 1570 changeset, finding all files matched by the match
1563 1571 function
1564 1572 '''
1565 1573 return self[node].walk(match)
1566 1574
1567 1575 def status(self, node1='.', node2=None, match=None,
1568 1576 ignored=False, clean=False, unknown=False,
1569 1577 listsubrepos=False):
1570 1578 """return status of files between two nodes or node and working
1571 1579 directory.
1572 1580
1573 1581 If node1 is None, use the first dirstate parent instead.
1574 1582 If node2 is None, compare node1 with working directory.
1575 1583 """
1576 1584
1577 1585 def mfmatches(ctx):
1578 1586 mf = ctx.manifest().copy()
1579 1587 if match.always():
1580 1588 return mf
1581 1589 for fn in mf.keys():
1582 1590 if not match(fn):
1583 1591 del mf[fn]
1584 1592 return mf
1585 1593
1586 1594 if isinstance(node1, context.changectx):
1587 1595 ctx1 = node1
1588 1596 else:
1589 1597 ctx1 = self[node1]
1590 1598 if isinstance(node2, context.changectx):
1591 1599 ctx2 = node2
1592 1600 else:
1593 1601 ctx2 = self[node2]
1594 1602
1595 1603 working = ctx2.rev() is None
1596 1604 parentworking = working and ctx1 == self['.']
1597 1605 match = match or matchmod.always(self.root, self.getcwd())
1598 1606 listignored, listclean, listunknown = ignored, clean, unknown
1599 1607
1600 1608 # load earliest manifest first for caching reasons
1601 1609 if not working and ctx2.rev() < ctx1.rev():
1602 1610 ctx2.manifest()
1603 1611
1604 1612 if not parentworking:
1605 1613 def bad(f, msg):
1606 1614 # 'f' may be a directory pattern from 'match.files()',
1607 1615 # so 'f not in ctx1' is not enough
1608 1616 if f not in ctx1 and f not in ctx1.dirs():
1609 1617 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1610 1618 match.bad = bad
1611 1619
1612 1620 if working: # we need to scan the working dir
1613 1621 subrepos = []
1614 1622 if '.hgsub' in self.dirstate:
1615 1623 subrepos = ctx2.substate.keys()
1616 1624 s = self.dirstate.status(match, subrepos, listignored,
1617 1625 listclean, listunknown)
1618 1626 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1619 1627
1620 1628 # check for any possibly clean files
1621 1629 if parentworking and cmp:
1622 1630 fixup = []
1623 1631 # do a full compare of any files that might have changed
1624 1632 for f in sorted(cmp):
1625 1633 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1626 1634 or ctx1[f].cmp(ctx2[f])):
1627 1635 modified.append(f)
1628 1636 else:
1629 1637 fixup.append(f)
1630 1638
1631 1639 # update dirstate for files that are actually clean
1632 1640 if fixup:
1633 1641 if listclean:
1634 1642 clean += fixup
1635 1643
1636 1644 try:
1637 1645 # updating the dirstate is optional
1638 1646 # so we don't wait on the lock
1639 1647 wlock = self.wlock(False)
1640 1648 try:
1641 1649 for f in fixup:
1642 1650 self.dirstate.normal(f)
1643 1651 finally:
1644 1652 wlock.release()
1645 1653 except error.LockError:
1646 1654 pass
1647 1655
1648 1656 if not parentworking:
1649 1657 mf1 = mfmatches(ctx1)
1650 1658 if working:
1651 1659 # we are comparing working dir against non-parent
1652 1660 # generate a pseudo-manifest for the working dir
1653 1661 mf2 = mfmatches(self['.'])
1654 1662 for f in cmp + modified + added:
1655 1663 mf2[f] = None
1656 1664 mf2.set(f, ctx2.flags(f))
1657 1665 for f in removed:
1658 1666 if f in mf2:
1659 1667 del mf2[f]
1660 1668 else:
1661 1669 # we are comparing two revisions
1662 1670 deleted, unknown, ignored = [], [], []
1663 1671 mf2 = mfmatches(ctx2)
1664 1672
1665 1673 modified, added, clean = [], [], []
1666 1674 withflags = mf1.withflags() | mf2.withflags()
1667 1675 for fn in mf2:
1668 1676 if fn in mf1:
1669 1677 if (fn not in deleted and
1670 1678 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1671 1679 (mf1[fn] != mf2[fn] and
1672 1680 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1673 1681 modified.append(fn)
1674 1682 elif listclean:
1675 1683 clean.append(fn)
1676 1684 del mf1[fn]
1677 1685 elif fn not in deleted:
1678 1686 added.append(fn)
1679 1687 removed = mf1.keys()
1680 1688
1681 1689 if working and modified and not self.dirstate._checklink:
1682 1690 # Symlink placeholders may get non-symlink-like contents
1683 1691 # via user error or dereferencing by NFS or Samba servers,
1684 1692 # so we filter out any placeholders that don't look like a
1685 1693 # symlink
1686 1694 sane = []
1687 1695 for f in modified:
1688 1696 if ctx2.flags(f) == 'l':
1689 1697 d = ctx2[f].data()
1690 1698 if len(d) >= 1024 or '\n' in d or util.binary(d):
1691 1699 self.ui.debug('ignoring suspect symlink placeholder'
1692 1700 ' "%s"\n' % f)
1693 1701 continue
1694 1702 sane.append(f)
1695 1703 modified = sane
1696 1704
1697 1705 r = modified, added, removed, deleted, unknown, ignored, clean
1698 1706
1699 1707 if listsubrepos:
1700 1708 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1701 1709 if working:
1702 1710 rev2 = None
1703 1711 else:
1704 1712 rev2 = ctx2.substate[subpath][1]
1705 1713 try:
1706 1714 submatch = matchmod.narrowmatcher(subpath, match)
1707 1715 s = sub.status(rev2, match=submatch, ignored=listignored,
1708 1716 clean=listclean, unknown=listunknown,
1709 1717 listsubrepos=True)
1710 1718 for rfiles, sfiles in zip(r, s):
1711 1719 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1712 1720 except error.LookupError:
1713 1721 self.ui.status(_("skipping missing subrepository: %s\n")
1714 1722 % subpath)
1715 1723
1716 1724 for l in r:
1717 1725 l.sort()
1718 1726 return r
1719 1727
1720 1728 def heads(self, start=None):
1721 1729 heads = self.changelog.heads(start)
1722 1730 # sort the output in rev descending order
1723 1731 return sorted(heads, key=self.changelog.rev, reverse=True)
1724 1732
1725 1733 def branchheads(self, branch=None, start=None, closed=False):
1726 1734 '''return a (possibly filtered) list of heads for the given branch
1727 1735
1728 1736 Heads are returned in topological order, from newest to oldest.
1729 1737 If branch is None, use the dirstate branch.
1730 1738 If start is not None, return only heads reachable from start.
1731 1739 If closed is True, return heads that are marked as closed as well.
1732 1740 '''
1733 1741 if branch is None:
1734 1742 branch = self[None].branch()
1735 1743 branches = self.branchmap()
1736 1744 if branch not in branches:
1737 1745 return []
1738 1746 # the cache returns heads ordered lowest to highest
1739 1747 bheads = list(reversed(branches[branch]))
1740 1748 if start is not None:
1741 1749 # filter out the heads that cannot be reached from startrev
1742 1750 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1743 1751 bheads = [h for h in bheads if h in fbheads]
1744 1752 if not closed:
1745 1753 bheads = [h for h in bheads if not self[h].closesbranch()]
1746 1754 return bheads
1747 1755
1748 1756 def branches(self, nodes):
1749 1757 if not nodes:
1750 1758 nodes = [self.changelog.tip()]
1751 1759 b = []
1752 1760 for n in nodes:
1753 1761 t = n
1754 1762 while True:
1755 1763 p = self.changelog.parents(n)
1756 1764 if p[1] != nullid or p[0] == nullid:
1757 1765 b.append((t, n, p[0], p[1]))
1758 1766 break
1759 1767 n = p[0]
1760 1768 return b
1761 1769
1762 1770 def between(self, pairs):
1763 1771 r = []
1764 1772
1765 1773 for top, bottom in pairs:
1766 1774 n, l, i = top, [], 0
1767 1775 f = 1
1768 1776
1769 1777 while n != bottom and n != nullid:
1770 1778 p = self.changelog.parents(n)[0]
1771 1779 if i == f:
1772 1780 l.append(n)
1773 1781 f = f * 2
1774 1782 n = p
1775 1783 i += 1
1776 1784
1777 1785 r.append(l)
1778 1786
1779 1787 return r
1780 1788
1781 1789 def pull(self, remote, heads=None, force=False):
1782 1790 # don't open transaction for nothing or you break future useful
1783 1791 # rollback call
1784 1792 tr = None
1785 1793 trname = 'pull\n' + util.hidepassword(remote.url())
1786 1794 lock = self.lock()
1787 1795 try:
1788 1796 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1789 1797 force=force)
1790 1798 common, fetch, rheads = tmp
1791 1799 if not fetch:
1792 1800 self.ui.status(_("no changes found\n"))
1793 1801 added = []
1794 1802 result = 0
1795 1803 else:
1796 1804 tr = self.transaction(trname)
1797 1805 if heads is None and list(common) == [nullid]:
1798 1806 self.ui.status(_("requesting all changes\n"))
1799 1807 elif heads is None and remote.capable('changegroupsubset'):
1800 1808 # issue1320, avoid a race if remote changed after discovery
1801 1809 heads = rheads
1802 1810
1803 1811 if remote.capable('getbundle'):
1804 1812 cg = remote.getbundle('pull', common=common,
1805 1813 heads=heads or rheads)
1806 1814 elif heads is None:
1807 1815 cg = remote.changegroup(fetch, 'pull')
1808 1816 elif not remote.capable('changegroupsubset'):
1809 1817 raise util.Abort(_("partial pull cannot be done because "
1810 1818 "other repository doesn't support "
1811 1819 "changegroupsubset."))
1812 1820 else:
1813 1821 cg = remote.changegroupsubset(fetch, heads, 'pull')
1814 1822 clstart = len(self.changelog)
1815 1823 result = self.addchangegroup(cg, 'pull', remote.url())
1816 1824 clend = len(self.changelog)
1817 1825 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1818 1826
1819 1827 # compute target subset
1820 1828 if heads is None:
1821 1829 # We pulled every thing possible
1822 1830 # sync on everything common
1823 1831 subset = common + added
1824 1832 else:
1825 1833 # We pulled a specific subset
1826 1834 # sync on this subset
1827 1835 subset = heads
1828 1836
1829 1837 # Get remote phases data from remote
1830 1838 remotephases = remote.listkeys('phases')
1831 1839 publishing = bool(remotephases.get('publishing', False))
1832 1840 if remotephases and not publishing:
1833 1841 # remote is new and unpublishing
1834 1842 pheads, _dr = phases.analyzeremotephases(self, subset,
1835 1843 remotephases)
1836 1844 phases.advanceboundary(self, phases.public, pheads)
1837 1845 phases.advanceboundary(self, phases.draft, subset)
1838 1846 else:
1839 1847 # Remote is old or publishing all common changesets
1840 1848 # should be seen as public
1841 1849 phases.advanceboundary(self, phases.public, subset)
1842 1850
1843 1851 if obsolete._enabled:
1844 1852 self.ui.debug('fetching remote obsolete markers\n')
1845 1853 remoteobs = remote.listkeys('obsolete')
1846 1854 if 'dump0' in remoteobs:
1847 1855 if tr is None:
1848 1856 tr = self.transaction(trname)
1849 1857 for key in sorted(remoteobs, reverse=True):
1850 1858 if key.startswith('dump'):
1851 1859 data = base85.b85decode(remoteobs[key])
1852 1860 self.obsstore.mergemarkers(tr, data)
1853 1861 if tr is not None:
1854 1862 tr.close()
1855 1863 finally:
1856 1864 if tr is not None:
1857 1865 tr.release()
1858 1866 lock.release()
1859 1867
1860 1868 return result
1861 1869
1862 1870 def checkpush(self, force, revs):
1863 1871 """Extensions can override this function if additional checks have
1864 1872 to be performed before pushing, or call it if they override push
1865 1873 command.
1866 1874 """
1867 1875 pass
1868 1876
1869 1877 def push(self, remote, force=False, revs=None, newbranch=False):
1870 1878 '''Push outgoing changesets (limited by revs) from the current
1871 1879 repository to remote. Return an integer:
1872 1880 - None means nothing to push
1873 1881 - 0 means HTTP error
1874 1882 - 1 means we pushed and remote head count is unchanged *or*
1875 1883 we have outgoing changesets but refused to push
1876 1884 - other values as described by addchangegroup()
1877 1885 '''
1878 1886 # there are two ways to push to remote repo:
1879 1887 #
1880 1888 # addchangegroup assumes local user can lock remote
1881 1889 # repo (local filesystem, old ssh servers).
1882 1890 #
1883 1891 # unbundle assumes local user cannot lock remote repo (new ssh
1884 1892 # servers, http servers).
1885 1893
1886 1894 if not remote.canpush():
1887 1895 raise util.Abort(_("destination does not support push"))
1888 1896 unfi = self.unfiltered()
1889 1897 # get local lock as we might write phase data
1890 1898 locallock = self.lock()
1891 1899 try:
1892 1900 self.checkpush(force, revs)
1893 1901 lock = None
1894 1902 unbundle = remote.capable('unbundle')
1895 1903 if not unbundle:
1896 1904 lock = remote.lock()
1897 1905 try:
1898 1906 # discovery
1899 1907 fci = discovery.findcommonincoming
1900 1908 commoninc = fci(unfi, remote, force=force)
1901 1909 common, inc, remoteheads = commoninc
1902 1910 fco = discovery.findcommonoutgoing
1903 1911 outgoing = fco(unfi, remote, onlyheads=revs,
1904 1912 commoninc=commoninc, force=force)
1905 1913
1906 1914
1907 1915 if not outgoing.missing:
1908 1916 # nothing to push
1909 1917 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1910 1918 ret = None
1911 1919 else:
1912 1920 # something to push
1913 1921 if not force:
1914 1922 # if self.obsstore == False --> no obsolete
1915 1923 # then, save the iteration
1916 1924 if unfi.obsstore:
1917 1925 # this message are here for 80 char limit reason
1918 1926 mso = _("push includes obsolete changeset: %s!")
1919 1927 msu = _("push includes unstable changeset: %s!")
1920 1928 msb = _("push includes bumped changeset: %s!")
1921 1929 msd = _("push includes divergent changeset: %s!")
1922 1930 # If we are to push if there is at least one
1923 1931 # obsolete or unstable changeset in missing, at
1924 1932 # least one of the missinghead will be obsolete or
1925 1933 # unstable. So checking heads only is ok
1926 1934 for node in outgoing.missingheads:
1927 1935 ctx = unfi[node]
1928 1936 if ctx.obsolete():
1929 1937 raise util.Abort(mso % ctx)
1930 1938 elif ctx.unstable():
1931 1939 raise util.Abort(msu % ctx)
1932 1940 elif ctx.bumped():
1933 1941 raise util.Abort(msb % ctx)
1934 1942 elif ctx.divergent():
1935 1943 raise util.Abort(msd % ctx)
1936 1944 discovery.checkheads(unfi, remote, outgoing,
1937 1945 remoteheads, newbranch,
1938 1946 bool(inc))
1939 1947
1940 1948 # create a changegroup from local
1941 1949 if revs is None and not outgoing.excluded:
1942 1950 # push everything,
1943 1951 # use the fast path, no race possible on push
1944 1952 cg = self._changegroup(outgoing.missing, 'push')
1945 1953 else:
1946 1954 cg = self.getlocalbundle('push', outgoing)
1947 1955
1948 1956 # apply changegroup to remote
1949 1957 if unbundle:
1950 1958 # local repo finds heads on server, finds out what
1951 1959 # revs it must push. once revs transferred, if server
1952 1960 # finds it has different heads (someone else won
1953 1961 # commit/push race), server aborts.
1954 1962 if force:
1955 1963 remoteheads = ['force']
1956 1964 # ssh: return remote's addchangegroup()
1957 1965 # http: return remote's addchangegroup() or 0 for error
1958 1966 ret = remote.unbundle(cg, remoteheads, 'push')
1959 1967 else:
1960 1968 # we return an integer indicating remote head count
1961 1969 # change
1962 1970 ret = remote.addchangegroup(cg, 'push', self.url())
1963 1971
1964 1972 if ret:
1965 1973 # push succeed, synchronize target of the push
1966 1974 cheads = outgoing.missingheads
1967 1975 elif revs is None:
1968 1976 # All out push fails. synchronize all common
1969 1977 cheads = outgoing.commonheads
1970 1978 else:
1971 1979 # I want cheads = heads(::missingheads and ::commonheads)
1972 1980 # (missingheads is revs with secret changeset filtered out)
1973 1981 #
1974 1982 # This can be expressed as:
1975 1983 # cheads = ( (missingheads and ::commonheads)
1976 1984 # + (commonheads and ::missingheads))"
1977 1985 # )
1978 1986 #
1979 1987 # while trying to push we already computed the following:
1980 1988 # common = (::commonheads)
1981 1989 # missing = ((commonheads::missingheads) - commonheads)
1982 1990 #
1983 1991 # We can pick:
1984 1992 # * missingheads part of common (::commonheads)
1985 1993 common = set(outgoing.common)
1986 1994 cheads = [node for node in revs if node in common]
1987 1995 # and
1988 1996 # * commonheads parents on missing
1989 1997 revset = unfi.set('%ln and parents(roots(%ln))',
1990 1998 outgoing.commonheads,
1991 1999 outgoing.missing)
1992 2000 cheads.extend(c.node() for c in revset)
1993 2001 # even when we don't push, exchanging phase data is useful
1994 2002 remotephases = remote.listkeys('phases')
1995 2003 if not remotephases: # old server or public only repo
1996 2004 phases.advanceboundary(self, phases.public, cheads)
1997 2005 # don't push any phase data as there is nothing to push
1998 2006 else:
1999 2007 ana = phases.analyzeremotephases(self, cheads, remotephases)
2000 2008 pheads, droots = ana
2001 2009 ### Apply remote phase on local
2002 2010 if remotephases.get('publishing', False):
2003 2011 phases.advanceboundary(self, phases.public, cheads)
2004 2012 else: # publish = False
2005 2013 phases.advanceboundary(self, phases.public, pheads)
2006 2014 phases.advanceboundary(self, phases.draft, cheads)
2007 2015 ### Apply local phase on remote
2008 2016
2009 2017 # Get the list of all revs draft on remote by public here.
2010 2018 # XXX Beware that revset break if droots is not strictly
2011 2019 # XXX root we may want to ensure it is but it is costly
2012 2020 outdated = unfi.set('heads((%ln::%ln) and public())',
2013 2021 droots, cheads)
2014 2022 for newremotehead in outdated:
2015 2023 r = remote.pushkey('phases',
2016 2024 newremotehead.hex(),
2017 2025 str(phases.draft),
2018 2026 str(phases.public))
2019 2027 if not r:
2020 2028 self.ui.warn(_('updating %s to public failed!\n')
2021 2029 % newremotehead)
2022 2030 self.ui.debug('try to push obsolete markers to remote\n')
2023 2031 if (obsolete._enabled and self.obsstore and
2024 2032 'obsolete' in remote.listkeys('namespaces')):
2025 2033 rslts = []
2026 2034 remotedata = self.listkeys('obsolete')
2027 2035 for key in sorted(remotedata, reverse=True):
2028 2036 # reverse sort to ensure we end with dump0
2029 2037 data = remotedata[key]
2030 2038 rslts.append(remote.pushkey('obsolete', key, '', data))
2031 2039 if [r for r in rslts if not r]:
2032 2040 msg = _('failed to push some obsolete markers!\n')
2033 2041 self.ui.warn(msg)
2034 2042 finally:
2035 2043 if lock is not None:
2036 2044 lock.release()
2037 2045 finally:
2038 2046 locallock.release()
2039 2047
2040 2048 self.ui.debug("checking for updated bookmarks\n")
2041 2049 rb = remote.listkeys('bookmarks')
2042 2050 for k in rb.keys():
2043 2051 if k in unfi._bookmarks:
2044 2052 nr, nl = rb[k], hex(self._bookmarks[k])
2045 2053 if nr in unfi:
2046 2054 cr = unfi[nr]
2047 2055 cl = unfi[nl]
2048 2056 if bookmarks.validdest(unfi, cr, cl):
2049 2057 r = remote.pushkey('bookmarks', k, nr, nl)
2050 2058 if r:
2051 2059 self.ui.status(_("updating bookmark %s\n") % k)
2052 2060 else:
2053 2061 self.ui.warn(_('updating bookmark %s'
2054 2062 ' failed!\n') % k)
2055 2063
2056 2064 return ret
2057 2065
2058 2066 def changegroupinfo(self, nodes, source):
2059 2067 if self.ui.verbose or source == 'bundle':
2060 2068 self.ui.status(_("%d changesets found\n") % len(nodes))
2061 2069 if self.ui.debugflag:
2062 2070 self.ui.debug("list of changesets:\n")
2063 2071 for node in nodes:
2064 2072 self.ui.debug("%s\n" % hex(node))
2065 2073
2066 2074 def changegroupsubset(self, bases, heads, source):
2067 2075 """Compute a changegroup consisting of all the nodes that are
2068 2076 descendants of any of the bases and ancestors of any of the heads.
2069 2077 Return a chunkbuffer object whose read() method will return
2070 2078 successive changegroup chunks.
2071 2079
2072 2080 It is fairly complex as determining which filenodes and which
2073 2081 manifest nodes need to be included for the changeset to be complete
2074 2082 is non-trivial.
2075 2083
2076 2084 Another wrinkle is doing the reverse, figuring out which changeset in
2077 2085 the changegroup a particular filenode or manifestnode belongs to.
2078 2086 """
2079 2087 cl = self.changelog
2080 2088 if not bases:
2081 2089 bases = [nullid]
2082 2090 csets, bases, heads = cl.nodesbetween(bases, heads)
2083 2091 # We assume that all ancestors of bases are known
2084 2092 common = cl.ancestors([cl.rev(n) for n in bases])
2085 2093 return self._changegroupsubset(common, csets, heads, source)
2086 2094
2087 2095 def getlocalbundle(self, source, outgoing):
2088 2096 """Like getbundle, but taking a discovery.outgoing as an argument.
2089 2097
2090 2098 This is only implemented for local repos and reuses potentially
2091 2099 precomputed sets in outgoing."""
2092 2100 if not outgoing.missing:
2093 2101 return None
2094 2102 return self._changegroupsubset(outgoing.common,
2095 2103 outgoing.missing,
2096 2104 outgoing.missingheads,
2097 2105 source)
2098 2106
2099 2107 def getbundle(self, source, heads=None, common=None):
2100 2108 """Like changegroupsubset, but returns the set difference between the
2101 2109 ancestors of heads and the ancestors common.
2102 2110
2103 2111 If heads is None, use the local heads. If common is None, use [nullid].
2104 2112
2105 2113 The nodes in common might not all be known locally due to the way the
2106 2114 current discovery protocol works.
2107 2115 """
2108 2116 cl = self.changelog
2109 2117 if common:
2110 2118 hasnode = cl.hasnode
2111 2119 common = [n for n in common if hasnode(n)]
2112 2120 else:
2113 2121 common = [nullid]
2114 2122 if not heads:
2115 2123 heads = cl.heads()
2116 2124 return self.getlocalbundle(source,
2117 2125 discovery.outgoing(cl, common, heads))
2118 2126
2119 2127 @unfilteredmethod
2120 2128 def _changegroupsubset(self, commonrevs, csets, heads, source):
2121 2129
2122 2130 cl = self.changelog
2123 2131 mf = self.manifest
2124 2132 mfs = {} # needed manifests
2125 2133 fnodes = {} # needed file nodes
2126 2134 changedfiles = set()
2127 2135 fstate = ['', {}]
2128 2136 count = [0, 0]
2129 2137
2130 2138 # can we go through the fast path ?
2131 2139 heads.sort()
2132 2140 if heads == sorted(self.heads()):
2133 2141 return self._changegroup(csets, source)
2134 2142
2135 2143 # slow path
2136 2144 self.hook('preoutgoing', throw=True, source=source)
2137 2145 self.changegroupinfo(csets, source)
2138 2146
2139 2147 # filter any nodes that claim to be part of the known set
2140 2148 def prune(revlog, missing):
2141 2149 rr, rl = revlog.rev, revlog.linkrev
2142 2150 return [n for n in missing
2143 2151 if rl(rr(n)) not in commonrevs]
2144 2152
2145 2153 progress = self.ui.progress
2146 2154 _bundling = _('bundling')
2147 2155 _changesets = _('changesets')
2148 2156 _manifests = _('manifests')
2149 2157 _files = _('files')
2150 2158
2151 2159 def lookup(revlog, x):
2152 2160 if revlog == cl:
2153 2161 c = cl.read(x)
2154 2162 changedfiles.update(c[3])
2155 2163 mfs.setdefault(c[0], x)
2156 2164 count[0] += 1
2157 2165 progress(_bundling, count[0],
2158 2166 unit=_changesets, total=count[1])
2159 2167 return x
2160 2168 elif revlog == mf:
2161 2169 clnode = mfs[x]
2162 2170 mdata = mf.readfast(x)
2163 2171 for f, n in mdata.iteritems():
2164 2172 if f in changedfiles:
2165 2173 fnodes[f].setdefault(n, clnode)
2166 2174 count[0] += 1
2167 2175 progress(_bundling, count[0],
2168 2176 unit=_manifests, total=count[1])
2169 2177 return clnode
2170 2178 else:
2171 2179 progress(_bundling, count[0], item=fstate[0],
2172 2180 unit=_files, total=count[1])
2173 2181 return fstate[1][x]
2174 2182
2175 2183 bundler = changegroup.bundle10(lookup)
2176 2184 reorder = self.ui.config('bundle', 'reorder', 'auto')
2177 2185 if reorder == 'auto':
2178 2186 reorder = None
2179 2187 else:
2180 2188 reorder = util.parsebool(reorder)
2181 2189
2182 2190 def gengroup():
2183 2191 # Create a changenode group generator that will call our functions
2184 2192 # back to lookup the owning changenode and collect information.
2185 2193 count[:] = [0, len(csets)]
2186 2194 for chunk in cl.group(csets, bundler, reorder=reorder):
2187 2195 yield chunk
2188 2196 progress(_bundling, None)
2189 2197
2190 2198 # Create a generator for the manifestnodes that calls our lookup
2191 2199 # and data collection functions back.
2192 2200 for f in changedfiles:
2193 2201 fnodes[f] = {}
2194 2202 count[:] = [0, len(mfs)]
2195 2203 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2196 2204 yield chunk
2197 2205 progress(_bundling, None)
2198 2206
2199 2207 mfs.clear()
2200 2208
2201 2209 # Go through all our files in order sorted by name.
2202 2210 count[:] = [0, len(changedfiles)]
2203 2211 for fname in sorted(changedfiles):
2204 2212 filerevlog = self.file(fname)
2205 2213 if not len(filerevlog):
2206 2214 raise util.Abort(_("empty or missing revlog for %s")
2207 2215 % fname)
2208 2216 fstate[0] = fname
2209 2217 fstate[1] = fnodes.pop(fname, {})
2210 2218
2211 2219 nodelist = prune(filerevlog, fstate[1])
2212 2220 if nodelist:
2213 2221 count[0] += 1
2214 2222 yield bundler.fileheader(fname)
2215 2223 for chunk in filerevlog.group(nodelist, bundler, reorder):
2216 2224 yield chunk
2217 2225
2218 2226 # Signal that no more groups are left.
2219 2227 yield bundler.close()
2220 2228 progress(_bundling, None)
2221 2229
2222 2230 if csets:
2223 2231 self.hook('outgoing', node=hex(csets[0]), source=source)
2224 2232
2225 2233 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2226 2234
2227 2235 def changegroup(self, basenodes, source):
2228 2236 # to avoid a race we use changegroupsubset() (issue1320)
2229 2237 return self.changegroupsubset(basenodes, self.heads(), source)
2230 2238
2231 2239 @unfilteredmethod
2232 2240 def _changegroup(self, nodes, source):
2233 2241 """Compute the changegroup of all nodes that we have that a recipient
2234 2242 doesn't. Return a chunkbuffer object whose read() method will return
2235 2243 successive changegroup chunks.
2236 2244
2237 2245 This is much easier than the previous function as we can assume that
2238 2246 the recipient has any changenode we aren't sending them.
2239 2247
2240 2248 nodes is the set of nodes to send"""
2241 2249
2242 2250 cl = self.changelog
2243 2251 mf = self.manifest
2244 2252 mfs = {}
2245 2253 changedfiles = set()
2246 2254 fstate = ['']
2247 2255 count = [0, 0]
2248 2256
2249 2257 self.hook('preoutgoing', throw=True, source=source)
2250 2258 self.changegroupinfo(nodes, source)
2251 2259
2252 2260 revset = set([cl.rev(n) for n in nodes])
2253 2261
2254 2262 def gennodelst(log):
2255 2263 ln, llr = log.node, log.linkrev
2256 2264 return [ln(r) for r in log if llr(r) in revset]
2257 2265
2258 2266 progress = self.ui.progress
2259 2267 _bundling = _('bundling')
2260 2268 _changesets = _('changesets')
2261 2269 _manifests = _('manifests')
2262 2270 _files = _('files')
2263 2271
2264 2272 def lookup(revlog, x):
2265 2273 if revlog == cl:
2266 2274 c = cl.read(x)
2267 2275 changedfiles.update(c[3])
2268 2276 mfs.setdefault(c[0], x)
2269 2277 count[0] += 1
2270 2278 progress(_bundling, count[0],
2271 2279 unit=_changesets, total=count[1])
2272 2280 return x
2273 2281 elif revlog == mf:
2274 2282 count[0] += 1
2275 2283 progress(_bundling, count[0],
2276 2284 unit=_manifests, total=count[1])
2277 2285 return cl.node(revlog.linkrev(revlog.rev(x)))
2278 2286 else:
2279 2287 progress(_bundling, count[0], item=fstate[0],
2280 2288 total=count[1], unit=_files)
2281 2289 return cl.node(revlog.linkrev(revlog.rev(x)))
2282 2290
2283 2291 bundler = changegroup.bundle10(lookup)
2284 2292 reorder = self.ui.config('bundle', 'reorder', 'auto')
2285 2293 if reorder == 'auto':
2286 2294 reorder = None
2287 2295 else:
2288 2296 reorder = util.parsebool(reorder)
2289 2297
2290 2298 def gengroup():
2291 2299 '''yield a sequence of changegroup chunks (strings)'''
2292 2300 # construct a list of all changed files
2293 2301
2294 2302 count[:] = [0, len(nodes)]
2295 2303 for chunk in cl.group(nodes, bundler, reorder=reorder):
2296 2304 yield chunk
2297 2305 progress(_bundling, None)
2298 2306
2299 2307 count[:] = [0, len(mfs)]
2300 2308 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2301 2309 yield chunk
2302 2310 progress(_bundling, None)
2303 2311
2304 2312 count[:] = [0, len(changedfiles)]
2305 2313 for fname in sorted(changedfiles):
2306 2314 filerevlog = self.file(fname)
2307 2315 if not len(filerevlog):
2308 2316 raise util.Abort(_("empty or missing revlog for %s")
2309 2317 % fname)
2310 2318 fstate[0] = fname
2311 2319 nodelist = gennodelst(filerevlog)
2312 2320 if nodelist:
2313 2321 count[0] += 1
2314 2322 yield bundler.fileheader(fname)
2315 2323 for chunk in filerevlog.group(nodelist, bundler, reorder):
2316 2324 yield chunk
2317 2325 yield bundler.close()
2318 2326 progress(_bundling, None)
2319 2327
2320 2328 if nodes:
2321 2329 self.hook('outgoing', node=hex(nodes[0]), source=source)
2322 2330
2323 2331 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2324 2332
2325 2333 @unfilteredmethod
2326 2334 def addchangegroup(self, source, srctype, url, emptyok=False):
2327 2335 """Add the changegroup returned by source.read() to this repo.
2328 2336 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2329 2337 the URL of the repo where this changegroup is coming from.
2330 2338
2331 2339 Return an integer summarizing the change to this repo:
2332 2340 - nothing changed or no source: 0
2333 2341 - more heads than before: 1+added heads (2..n)
2334 2342 - fewer heads than before: -1-removed heads (-2..-n)
2335 2343 - number of heads stays the same: 1
2336 2344 """
2337 2345 def csmap(x):
2338 2346 self.ui.debug("add changeset %s\n" % short(x))
2339 2347 return len(cl)
2340 2348
2341 2349 def revmap(x):
2342 2350 return cl.rev(x)
2343 2351
2344 2352 if not source:
2345 2353 return 0
2346 2354
2347 2355 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2348 2356
2349 2357 changesets = files = revisions = 0
2350 2358 efiles = set()
2351 2359
2352 2360 # write changelog data to temp files so concurrent readers will not see
2353 2361 # inconsistent view
2354 2362 cl = self.changelog
2355 2363 cl.delayupdate()
2356 2364 oldheads = cl.heads()
2357 2365
2358 2366 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2359 2367 try:
2360 2368 trp = weakref.proxy(tr)
2361 2369 # pull off the changeset group
2362 2370 self.ui.status(_("adding changesets\n"))
2363 2371 clstart = len(cl)
2364 2372 class prog(object):
2365 2373 step = _('changesets')
2366 2374 count = 1
2367 2375 ui = self.ui
2368 2376 total = None
2369 2377 def __call__(self):
2370 2378 self.ui.progress(self.step, self.count, unit=_('chunks'),
2371 2379 total=self.total)
2372 2380 self.count += 1
2373 2381 pr = prog()
2374 2382 source.callback = pr
2375 2383
2376 2384 source.changelogheader()
2377 2385 srccontent = cl.addgroup(source, csmap, trp)
2378 2386 if not (srccontent or emptyok):
2379 2387 raise util.Abort(_("received changelog group is empty"))
2380 2388 clend = len(cl)
2381 2389 changesets = clend - clstart
2382 2390 for c in xrange(clstart, clend):
2383 2391 efiles.update(self[c].files())
2384 2392 efiles = len(efiles)
2385 2393 self.ui.progress(_('changesets'), None)
2386 2394
2387 2395 # pull off the manifest group
2388 2396 self.ui.status(_("adding manifests\n"))
2389 2397 pr.step = _('manifests')
2390 2398 pr.count = 1
2391 2399 pr.total = changesets # manifests <= changesets
2392 2400 # no need to check for empty manifest group here:
2393 2401 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2394 2402 # no new manifest will be created and the manifest group will
2395 2403 # be empty during the pull
2396 2404 source.manifestheader()
2397 2405 self.manifest.addgroup(source, revmap, trp)
2398 2406 self.ui.progress(_('manifests'), None)
2399 2407
2400 2408 needfiles = {}
2401 2409 if self.ui.configbool('server', 'validate', default=False):
2402 2410 # validate incoming csets have their manifests
2403 2411 for cset in xrange(clstart, clend):
2404 2412 mfest = self.changelog.read(self.changelog.node(cset))[0]
2405 2413 mfest = self.manifest.readdelta(mfest)
2406 2414 # store file nodes we must see
2407 2415 for f, n in mfest.iteritems():
2408 2416 needfiles.setdefault(f, set()).add(n)
2409 2417
2410 2418 # process the files
2411 2419 self.ui.status(_("adding file changes\n"))
2412 2420 pr.step = _('files')
2413 2421 pr.count = 1
2414 2422 pr.total = efiles
2415 2423 source.callback = None
2416 2424
2417 2425 while True:
2418 2426 chunkdata = source.filelogheader()
2419 2427 if not chunkdata:
2420 2428 break
2421 2429 f = chunkdata["filename"]
2422 2430 self.ui.debug("adding %s revisions\n" % f)
2423 2431 pr()
2424 2432 fl = self.file(f)
2425 2433 o = len(fl)
2426 2434 if not fl.addgroup(source, revmap, trp):
2427 2435 raise util.Abort(_("received file revlog group is empty"))
2428 2436 revisions += len(fl) - o
2429 2437 files += 1
2430 2438 if f in needfiles:
2431 2439 needs = needfiles[f]
2432 2440 for new in xrange(o, len(fl)):
2433 2441 n = fl.node(new)
2434 2442 if n in needs:
2435 2443 needs.remove(n)
2436 2444 if not needs:
2437 2445 del needfiles[f]
2438 2446 self.ui.progress(_('files'), None)
2439 2447
2440 2448 for f, needs in needfiles.iteritems():
2441 2449 fl = self.file(f)
2442 2450 for n in needs:
2443 2451 try:
2444 2452 fl.rev(n)
2445 2453 except error.LookupError:
2446 2454 raise util.Abort(
2447 2455 _('missing file data for %s:%s - run hg verify') %
2448 2456 (f, hex(n)))
2449 2457
2450 2458 dh = 0
2451 2459 if oldheads:
2452 2460 heads = cl.heads()
2453 2461 dh = len(heads) - len(oldheads)
2454 2462 for h in heads:
2455 2463 if h not in oldheads and self[h].closesbranch():
2456 2464 dh -= 1
2457 2465 htext = ""
2458 2466 if dh:
2459 2467 htext = _(" (%+d heads)") % dh
2460 2468
2461 2469 self.ui.status(_("added %d changesets"
2462 2470 " with %d changes to %d files%s\n")
2463 2471 % (changesets, revisions, files, htext))
2464 2472 obsolete.clearobscaches(self)
2465 2473
2466 2474 if changesets > 0:
2467 2475 p = lambda: cl.writepending() and self.root or ""
2468 2476 self.hook('pretxnchangegroup', throw=True,
2469 2477 node=hex(cl.node(clstart)), source=srctype,
2470 2478 url=url, pending=p)
2471 2479
2472 2480 added = [cl.node(r) for r in xrange(clstart, clend)]
2473 2481 publishing = self.ui.configbool('phases', 'publish', True)
2474 2482 if srctype == 'push':
2475 2483 # Old server can not push the boundary themself.
2476 2484 # New server won't push the boundary if changeset already
2477 2485 # existed locally as secrete
2478 2486 #
2479 2487 # We should not use added here but the list of all change in
2480 2488 # the bundle
2481 2489 if publishing:
2482 2490 phases.advanceboundary(self, phases.public, srccontent)
2483 2491 else:
2484 2492 phases.advanceboundary(self, phases.draft, srccontent)
2485 2493 phases.retractboundary(self, phases.draft, added)
2486 2494 elif srctype != 'strip':
2487 2495 # publishing only alter behavior during push
2488 2496 #
2489 2497 # strip should not touch boundary at all
2490 2498 phases.retractboundary(self, phases.draft, added)
2491 2499
2492 2500 # make changelog see real files again
2493 2501 cl.finalize(trp)
2494 2502
2495 2503 tr.close()
2496 2504
2497 2505 if changesets > 0:
2498 2506 self.updatebranchcache()
2499 2507 def runhooks():
2500 2508 # forcefully update the on-disk branch cache
2501 2509 self.ui.debug("updating the branch cache\n")
2502 2510 self.hook("changegroup", node=hex(cl.node(clstart)),
2503 2511 source=srctype, url=url)
2504 2512
2505 2513 for n in added:
2506 2514 self.hook("incoming", node=hex(n), source=srctype,
2507 2515 url=url)
2508 2516 self._afterlock(runhooks)
2509 2517
2510 2518 finally:
2511 2519 tr.release()
2512 2520 # never return 0 here:
2513 2521 if dh < 0:
2514 2522 return dh - 1
2515 2523 else:
2516 2524 return dh + 1
2517 2525
2518 2526 def stream_in(self, remote, requirements):
2519 2527 lock = self.lock()
2520 2528 try:
2521 2529 # Save remote branchmap. We will use it later
2522 2530 # to speed up branchcache creation
2523 2531 rbranchmap = None
2524 2532 if remote.capable("branchmap"):
2525 2533 rbranchmap = remote.branchmap()
2526 2534
2527 2535 fp = remote.stream_out()
2528 2536 l = fp.readline()
2529 2537 try:
2530 2538 resp = int(l)
2531 2539 except ValueError:
2532 2540 raise error.ResponseError(
2533 2541 _('unexpected response from remote server:'), l)
2534 2542 if resp == 1:
2535 2543 raise util.Abort(_('operation forbidden by server'))
2536 2544 elif resp == 2:
2537 2545 raise util.Abort(_('locking the remote repository failed'))
2538 2546 elif resp != 0:
2539 2547 raise util.Abort(_('the server sent an unknown error code'))
2540 2548 self.ui.status(_('streaming all changes\n'))
2541 2549 l = fp.readline()
2542 2550 try:
2543 2551 total_files, total_bytes = map(int, l.split(' ', 1))
2544 2552 except (ValueError, TypeError):
2545 2553 raise error.ResponseError(
2546 2554 _('unexpected response from remote server:'), l)
2547 2555 self.ui.status(_('%d files to transfer, %s of data\n') %
2548 2556 (total_files, util.bytecount(total_bytes)))
2549 2557 handled_bytes = 0
2550 2558 self.ui.progress(_('clone'), 0, total=total_bytes)
2551 2559 start = time.time()
2552 2560 for i in xrange(total_files):
2553 2561 # XXX doesn't support '\n' or '\r' in filenames
2554 2562 l = fp.readline()
2555 2563 try:
2556 2564 name, size = l.split('\0', 1)
2557 2565 size = int(size)
2558 2566 except (ValueError, TypeError):
2559 2567 raise error.ResponseError(
2560 2568 _('unexpected response from remote server:'), l)
2561 2569 if self.ui.debugflag:
2562 2570 self.ui.debug('adding %s (%s)\n' %
2563 2571 (name, util.bytecount(size)))
2564 2572 # for backwards compat, name was partially encoded
2565 2573 ofp = self.sopener(store.decodedir(name), 'w')
2566 2574 for chunk in util.filechunkiter(fp, limit=size):
2567 2575 handled_bytes += len(chunk)
2568 2576 self.ui.progress(_('clone'), handled_bytes,
2569 2577 total=total_bytes)
2570 2578 ofp.write(chunk)
2571 2579 ofp.close()
2572 2580 elapsed = time.time() - start
2573 2581 if elapsed <= 0:
2574 2582 elapsed = 0.001
2575 2583 self.ui.progress(_('clone'), None)
2576 2584 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2577 2585 (util.bytecount(total_bytes), elapsed,
2578 2586 util.bytecount(total_bytes / elapsed)))
2579 2587
2580 2588 # new requirements = old non-format requirements +
2581 2589 # new format-related
2582 2590 # requirements from the streamed-in repository
2583 2591 requirements.update(set(self.requirements) - self.supportedformats)
2584 2592 self._applyrequirements(requirements)
2585 2593 self._writerequirements()
2586 2594
2587 2595 if rbranchmap:
2588 2596 rbheads = []
2589 2597 for bheads in rbranchmap.itervalues():
2590 2598 rbheads.extend(bheads)
2591 2599
2592 2600 self.branchcache = rbranchmap
2593 2601 if rbheads:
2594 2602 rtiprev = max((int(self.changelog.rev(node))
2595 2603 for node in rbheads))
2596 2604 self._writebranchcache(self.branchcache,
2597 2605 self[rtiprev].node(), rtiprev)
2598 2606 self.invalidate()
2599 2607 return len(self.heads()) + 1
2600 2608 finally:
2601 2609 lock.release()
2602 2610
2603 2611 def clone(self, remote, heads=[], stream=False):
2604 2612 '''clone remote repository.
2605 2613
2606 2614 keyword arguments:
2607 2615 heads: list of revs to clone (forces use of pull)
2608 2616 stream: use streaming clone if possible'''
2609 2617
2610 2618 # now, all clients that can request uncompressed clones can
2611 2619 # read repo formats supported by all servers that can serve
2612 2620 # them.
2613 2621
2614 2622 # if revlog format changes, client will have to check version
2615 2623 # and format flags on "stream" capability, and use
2616 2624 # uncompressed only if compatible.
2617 2625
2618 2626 if not stream:
2619 2627 # if the server explicitly prefers to stream (for fast LANs)
2620 2628 stream = remote.capable('stream-preferred')
2621 2629
2622 2630 if stream and not heads:
2623 2631 # 'stream' means remote revlog format is revlogv1 only
2624 2632 if remote.capable('stream'):
2625 2633 return self.stream_in(remote, set(('revlogv1',)))
2626 2634 # otherwise, 'streamreqs' contains the remote revlog format
2627 2635 streamreqs = remote.capable('streamreqs')
2628 2636 if streamreqs:
2629 2637 streamreqs = set(streamreqs.split(','))
2630 2638 # if we support it, stream in and adjust our requirements
2631 2639 if not streamreqs - self.supportedformats:
2632 2640 return self.stream_in(remote, streamreqs)
2633 2641 return self.pull(remote, heads)
2634 2642
2635 2643 def pushkey(self, namespace, key, old, new):
2636 2644 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2637 2645 old=old, new=new)
2638 2646 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2639 2647 ret = pushkey.push(self, namespace, key, old, new)
2640 2648 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2641 2649 ret=ret)
2642 2650 return ret
2643 2651
2644 2652 def listkeys(self, namespace):
2645 2653 self.hook('prelistkeys', throw=True, namespace=namespace)
2646 2654 self.ui.debug('listing keys for "%s"\n' % namespace)
2647 2655 values = pushkey.list(self, namespace)
2648 2656 self.hook('listkeys', namespace=namespace, values=values)
2649 2657 return values
2650 2658
2651 2659 def debugwireargs(self, one, two, three=None, four=None, five=None):
2652 2660 '''used to test argument passing over the wire'''
2653 2661 return "%s %s %s %s %s" % (one, two, three, four, five)
2654 2662
2655 2663 def savecommitmessage(self, text):
2656 2664 fp = self.opener('last-message.txt', 'wb')
2657 2665 try:
2658 2666 fp.write(text)
2659 2667 finally:
2660 2668 fp.close()
2661 2669 return self.pathto(fp.name[len(self.root) + 1:])
2662 2670
2663 2671 # used to avoid circular references so destructors work
2664 2672 def aftertrans(files):
2665 2673 renamefiles = [tuple(t) for t in files]
2666 2674 def a():
2667 2675 for src, dest in renamefiles:
2668 2676 try:
2669 2677 util.rename(src, dest)
2670 2678 except OSError: # journal file does not yet exist
2671 2679 pass
2672 2680 return a
2673 2681
2674 2682 def undoname(fn):
2675 2683 base, name = os.path.split(fn)
2676 2684 assert name.startswith('journal')
2677 2685 return os.path.join(base, name.replace('journal', 'undo', 1))
2678 2686
2679 2687 def instance(ui, path, create):
2680 2688 return localrepository(ui, util.urllocalpath(path), create)
2681 2689
2682 2690 def islocal(path):
2683 2691 return True
General Comments 0
You need to be logged in to leave comments. Login now