##// END OF EJS Templates
branchmap: remove the dict interface from the branchcache class (API)...
Pulkit Goyal -
r42168:624d6683 default
parent child Browse files
Show More
@@ -1,600 +1,618
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served': 'immutable',
44 44 'immutable': 'base'}
45 45
46 46
47 47 class BranchMapCache(object):
48 48 """mapping of filtered views of repo with their branchcache"""
49 49 def __init__(self):
50 50 self._per_filter = {}
51 51
52 52 def __getitem__(self, repo):
53 53 self.updatecache(repo)
54 54 return self._per_filter[repo.filtername]
55 55
56 56 def updatecache(self, repo):
57 57 """Update the cache for the given filtered view on a repository"""
58 58 # This can trigger updates for the caches for subsets of the filtered
59 59 # view, e.g. when there is no cache for this filtered view or the cache
60 60 # is stale.
61 61
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 bcache = self._per_filter.get(filtername)
65 65 if bcache is None or not bcache.validfor(repo):
66 66 # cache object missing or cache object stale? Read from disk
67 67 bcache = branchcache.fromfile(repo)
68 68
69 69 revs = []
70 70 if bcache is None:
71 71 # no (fresh) cache available anymore, perhaps we can re-use
72 72 # the cache for a subset, then extend that to add info on missing
73 73 # revisions.
74 74 subsetname = subsettable.get(filtername)
75 75 if subsetname is not None:
76 76 subset = repo.filtered(subsetname)
77 77 bcache = self[subset].copy()
78 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 80 else:
81 81 # nothing to fall back on, start empty.
82 82 bcache = branchcache()
83 83
84 84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 85 if revs:
86 86 bcache.update(repo, revs)
87 87
88 88 assert bcache.validfor(repo), filtername
89 89 self._per_filter[repo.filtername] = bcache
90 90
91 91 def replace(self, repo, remotebranchmap):
92 92 """Replace the branchmap cache for a repo with a branch mapping.
93 93
94 94 This is likely only called during clone with a branch map from a
95 95 remote.
96 96
97 97 """
98 98 cl = repo.changelog
99 99 clrev = cl.rev
100 100 clbranchinfo = cl.branchinfo
101 101 rbheads = []
102 102 closed = []
103 103 for bheads in remotebranchmap.itervalues():
104 104 rbheads += bheads
105 105 for h in bheads:
106 106 r = clrev(h)
107 107 b, c = clbranchinfo(r)
108 108 if c:
109 109 closed.append(h)
110 110
111 111 if rbheads:
112 112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 113 cache = branchcache(
114 114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 115 closednodes=closed)
116 116
117 117 # Try to stick it as low as possible
118 118 # filter above served are unlikely to be fetch from a clone
119 119 for candidate in ('base', 'immutable', 'served'):
120 120 rview = repo.filtered(candidate)
121 121 if cache.validfor(rview):
122 122 self._per_filter[candidate] = cache
123 123 cache.write(rview)
124 124 return
125 125
126 126 def clear(self):
127 127 self._per_filter.clear()
128 128
129 129
130 class branchcache(dict):
130 class branchcache(object):
131 131 """A dict like object that hold branches heads cache.
132 132
133 133 This cache is used to avoid costly computations to determine all the
134 134 branch heads of a repo.
135 135
136 136 The cache is serialized on disk in the following format:
137 137
138 138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 139 <branch head hex node> <open/closed state> <branch name>
140 140 <branch head hex node> <open/closed state> <branch name>
141 141 ...
142 142
143 143 The first line is used to check if the cache is still valid. If the
144 144 branch cache is for a filtered repo view, an optional third hash is
145 145 included that hashes the hashes of all filtered revisions.
146 146
147 147 The open/closed state is represented by a single letter 'o' or 'c'.
148 148 This field can be used to avoid changelog reads when determining if a
149 149 branch head closes a branch or not.
150 150 """
151 151
152 152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 153 filteredhash=None, closednodes=None):
154 super(branchcache, self).__init__(entries)
155 154 self.tipnode = tipnode
156 155 self.tiprev = tiprev
157 156 self.filteredhash = filteredhash
158 157 # closednodes is a set of nodes that close their branch. If the branch
159 158 # cache has been updated, it may contain nodes that are no longer
160 159 # heads.
161 160 if closednodes is None:
162 161 self._closednodes = set()
163 162 else:
164 163 self._closednodes = closednodes
164 self.entries = dict(entries)
165
166 def __iter__(self):
167 return iter(self.entries)
168
169 def __setitem__(self, key, value):
170 self.entries[key] = value
171
172 def __getitem__(self, key):
173 return self.entries[key]
174
175 def setdefault(self, *args):
176 return self.entries.setdefault(*args)
177
178 def iteritems(self):
179 return self.entries.iteritems()
180
181 def itervalues(self):
182 return self.entries.itervalues()
165 183
166 184 @classmethod
167 185 def fromfile(cls, repo):
168 186 f = None
169 187 try:
170 188 f = repo.cachevfs(cls._filename(repo))
171 189 lineiter = iter(f)
172 190 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
173 191 last, lrev = cachekey[:2]
174 192 last, lrev = bin(last), int(lrev)
175 193 filteredhash = None
176 194 if len(cachekey) > 2:
177 195 filteredhash = bin(cachekey[2])
178 196 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
179 197 if not bcache.validfor(repo):
180 198 # invalidate the cache
181 199 raise ValueError(r'tip differs')
182 200 bcache.load(repo, lineiter)
183 201 except (IOError, OSError):
184 202 return None
185 203
186 204 except Exception as inst:
187 205 if repo.ui.debugflag:
188 206 msg = 'invalid branchheads cache'
189 207 if repo.filtername is not None:
190 208 msg += ' (%s)' % repo.filtername
191 209 msg += ': %s\n'
192 210 repo.ui.debug(msg % pycompat.bytestr(inst))
193 211 bcache = None
194 212
195 213 finally:
196 214 if f:
197 215 f.close()
198 216
199 217 return bcache
200 218
201 219 def load(self, repo, lineiter):
202 220 """ fully loads the branchcache by reading from the file using the line
203 221 iterator passed"""
204 222 cl = repo.changelog
205 223 for line in lineiter:
206 224 line = line.rstrip('\n')
207 225 if not line:
208 226 continue
209 227 node, state, label = line.split(" ", 2)
210 228 if state not in 'oc':
211 229 raise ValueError(r'invalid branch state')
212 230 label = encoding.tolocal(label.strip())
213 231 node = bin(node)
214 232 if not cl.hasnode(node):
215 233 raise ValueError(
216 234 r'node %s does not exist' % pycompat.sysstr(hex(node)))
217 235 self.setdefault(label, []).append(node)
218 236 if state == 'c':
219 237 self._closednodes.add(node)
220 238
221 239 @staticmethod
222 240 def _filename(repo):
223 241 """name of a branchcache file for a given repo or repoview"""
224 242 filename = "branch2"
225 243 if repo.filtername:
226 244 filename = '%s-%s' % (filename, repo.filtername)
227 245 return filename
228 246
229 247 def validfor(self, repo):
230 248 """Is the cache content valid regarding a repo
231 249
232 250 - False when cached tipnode is unknown or if we detect a strip.
233 251 - True when cache is up to date or a subset of current repo."""
234 252 try:
235 253 return ((self.tipnode == repo.changelog.node(self.tiprev))
236 254 and (self.filteredhash ==
237 255 scmutil.filteredhash(repo, self.tiprev)))
238 256 except IndexError:
239 257 return False
240 258
241 259 def _branchtip(self, heads):
242 260 '''Return tuple with last open head in heads and false,
243 261 otherwise return last closed head and true.'''
244 262 tip = heads[-1]
245 263 closed = True
246 264 for h in reversed(heads):
247 265 if h not in self._closednodes:
248 266 tip = h
249 267 closed = False
250 268 break
251 269 return tip, closed
252 270
253 271 def branchtip(self, branch):
254 272 '''Return the tipmost open head on branch head, otherwise return the
255 273 tipmost closed head on branch.
256 274 Raise KeyError for unknown branch.'''
257 275 return self._branchtip(self[branch])[0]
258 276
259 277 def iteropen(self, nodes):
260 278 return (n for n in nodes if n not in self._closednodes)
261 279
262 280 def branchheads(self, branch, closed=False):
263 281 heads = self[branch]
264 282 if not closed:
265 283 heads = list(self.iteropen(heads))
266 284 return heads
267 285
268 286 def iterbranches(self):
269 287 for bn, heads in self.iteritems():
270 288 yield (bn, heads) + self._branchtip(heads)
271 289
272 290 def copy(self):
273 291 """return an deep copy of the branchcache object"""
274 return type(self)(
275 self, self.tipnode, self.tiprev, self.filteredhash,
292 return branchcache(
293 self.entries, self.tipnode, self.tiprev, self.filteredhash,
276 294 self._closednodes)
277 295
278 296 def write(self, repo):
279 297 try:
280 298 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
281 299 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
282 300 if self.filteredhash is not None:
283 301 cachekey.append(hex(self.filteredhash))
284 302 f.write(" ".join(cachekey) + '\n')
285 303 nodecount = 0
286 304 for label, nodes in sorted(self.iteritems()):
287 305 label = encoding.fromlocal(label)
288 306 for node in nodes:
289 307 nodecount += 1
290 308 if node in self._closednodes:
291 309 state = 'c'
292 310 else:
293 311 state = 'o'
294 312 f.write("%s %s %s\n" % (hex(node), state, label))
295 313 f.close()
296 314 repo.ui.log('branchcache',
297 315 'wrote %s branch cache with %d labels and %d nodes\n',
298 repo.filtername, len(self), nodecount)
316 repo.filtername, len(self.entries), nodecount)
299 317 except (IOError, OSError, error.Abort) as inst:
300 318 # Abort may be raised by read only opener, so log and continue
301 319 repo.ui.debug("couldn't write branch cache: %s\n" %
302 320 stringutil.forcebytestr(inst))
303 321
304 322 def update(self, repo, revgen):
305 323 """Given a branchhead cache, self, that may have extra nodes or be
306 324 missing heads, and a generator of nodes that are strictly a superset of
307 325 heads missing, this function updates self to be correct.
308 326 """
309 327 starttime = util.timer()
310 328 cl = repo.changelog
311 329 # collect new branch entries
312 330 newbranches = {}
313 331 getbranchinfo = repo.revbranchcache().branchinfo
314 332 for r in revgen:
315 333 branch, closesbranch = getbranchinfo(r)
316 334 newbranches.setdefault(branch, []).append(r)
317 335 if closesbranch:
318 336 self._closednodes.add(cl.node(r))
319 337
320 338 # fetch current topological heads to speed up filtering
321 339 topoheads = set(cl.headrevs())
322 340
323 341 # if older branchheads are reachable from new ones, they aren't
324 342 # really branchheads. Note checking parents is insufficient:
325 343 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
326 344 for branch, newheadrevs in newbranches.iteritems():
327 345 bheads = self.setdefault(branch, [])
328 346 bheadset = set(cl.rev(node) for node in bheads)
329 347
330 348 # This have been tested True on all internal usage of this function.
331 349 # run it again in case of doubt
332 350 # assert not (set(bheadrevs) & set(newheadrevs))
333 351 bheadset.update(newheadrevs)
334 352
335 353 # This prunes out two kinds of heads - heads that are superseded by
336 354 # a head in newheadrevs, and newheadrevs that are not heads because
337 355 # an existing head is their descendant.
338 356 uncertain = bheadset - topoheads
339 357 if uncertain:
340 358 floorrev = min(uncertain)
341 359 ancestors = set(cl.ancestors(newheadrevs, floorrev))
342 360 bheadset -= ancestors
343 361 bheadrevs = sorted(bheadset)
344 362 self[branch] = [cl.node(rev) for rev in bheadrevs]
345 363 tiprev = bheadrevs[-1]
346 364 if tiprev > self.tiprev:
347 365 self.tipnode = cl.node(tiprev)
348 366 self.tiprev = tiprev
349 367
350 368 if not self.validfor(repo):
351 369 # cache key are not valid anymore
352 370 self.tipnode = nullid
353 371 self.tiprev = nullrev
354 for heads in self.values():
372 for heads in self.itervalues():
355 373 tiprev = max(cl.rev(node) for node in heads)
356 374 if tiprev > self.tiprev:
357 375 self.tipnode = cl.node(tiprev)
358 376 self.tiprev = tiprev
359 377 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
360 378
361 379 duration = util.timer() - starttime
362 380 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
363 381 repo.filtername or b'None', duration)
364 382
365 383 self.write(repo)
366 384
367 385
368 386 class remotebranchcache(branchcache):
369 387 """Branchmap info for a remote connection, should not write locally"""
370 388 def write(self, repo):
371 389 pass
372 390
373 391
374 392 # Revision branch info cache
375 393
376 394 _rbcversion = '-v1'
377 395 _rbcnames = 'rbc-names' + _rbcversion
378 396 _rbcrevs = 'rbc-revs' + _rbcversion
379 397 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
380 398 _rbcrecfmt = '>4sI'
381 399 _rbcrecsize = calcsize(_rbcrecfmt)
382 400 _rbcnodelen = 4
383 401 _rbcbranchidxmask = 0x7fffffff
384 402 _rbccloseflag = 0x80000000
385 403
386 404 class revbranchcache(object):
387 405 """Persistent cache, mapping from revision number to branch name and close.
388 406 This is a low level cache, independent of filtering.
389 407
390 408 Branch names are stored in rbc-names in internal encoding separated by 0.
391 409 rbc-names is append-only, and each branch name is only stored once and will
392 410 thus have a unique index.
393 411
394 412 The branch info for each revision is stored in rbc-revs as constant size
395 413 records. The whole file is read into memory, but it is only 'parsed' on
396 414 demand. The file is usually append-only but will be truncated if repo
397 415 modification is detected.
398 416 The record for each revision contains the first 4 bytes of the
399 417 corresponding node hash, and the record is only used if it still matches.
400 418 Even a completely trashed rbc-revs fill thus still give the right result
401 419 while converging towards full recovery ... assuming no incorrectly matching
402 420 node hashes.
403 421 The record also contains 4 bytes where 31 bits contains the index of the
404 422 branch and the last bit indicate that it is a branch close commit.
405 423 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
406 424 and will grow with it but be 1/8th of its size.
407 425 """
408 426
409 427 def __init__(self, repo, readonly=True):
410 428 assert repo.filtername is None
411 429 self._repo = repo
412 430 self._names = [] # branch names in local encoding with static index
413 431 self._rbcrevs = bytearray()
414 432 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
415 433 try:
416 434 bndata = repo.cachevfs.read(_rbcnames)
417 435 self._rbcsnameslen = len(bndata) # for verification before writing
418 436 if bndata:
419 437 self._names = [encoding.tolocal(bn)
420 438 for bn in bndata.split('\0')]
421 439 except (IOError, OSError):
422 440 if readonly:
423 441 # don't try to use cache - fall back to the slow path
424 442 self.branchinfo = self._branchinfo
425 443
426 444 if self._names:
427 445 try:
428 446 data = repo.cachevfs.read(_rbcrevs)
429 447 self._rbcrevs[:] = data
430 448 except (IOError, OSError) as inst:
431 449 repo.ui.debug("couldn't read revision branch cache: %s\n" %
432 450 stringutil.forcebytestr(inst))
433 451 # remember number of good records on disk
434 452 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
435 453 len(repo.changelog))
436 454 if self._rbcrevslen == 0:
437 455 self._names = []
438 456 self._rbcnamescount = len(self._names) # number of names read at
439 457 # _rbcsnameslen
440 458
441 459 def _clear(self):
442 460 self._rbcsnameslen = 0
443 461 del self._names[:]
444 462 self._rbcnamescount = 0
445 463 self._rbcrevslen = len(self._repo.changelog)
446 464 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
447 465 util.clearcachedproperty(self, '_namesreverse')
448 466
449 467 @util.propertycache
450 468 def _namesreverse(self):
451 469 return dict((b, r) for r, b in enumerate(self._names))
452 470
453 471 def branchinfo(self, rev):
454 472 """Return branch name and close flag for rev, using and updating
455 473 persistent cache."""
456 474 changelog = self._repo.changelog
457 475 rbcrevidx = rev * _rbcrecsize
458 476
459 477 # avoid negative index, changelog.read(nullrev) is fast without cache
460 478 if rev == nullrev:
461 479 return changelog.branchinfo(rev)
462 480
463 481 # if requested rev isn't allocated, grow and cache the rev info
464 482 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
465 483 return self._branchinfo(rev)
466 484
467 485 # fast path: extract data from cache, use it if node is matching
468 486 reponode = changelog.node(rev)[:_rbcnodelen]
469 487 cachenode, branchidx = unpack_from(
470 488 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
471 489 close = bool(branchidx & _rbccloseflag)
472 490 if close:
473 491 branchidx &= _rbcbranchidxmask
474 492 if cachenode == '\0\0\0\0':
475 493 pass
476 494 elif cachenode == reponode:
477 495 try:
478 496 return self._names[branchidx], close
479 497 except IndexError:
480 498 # recover from invalid reference to unknown branch
481 499 self._repo.ui.debug("referenced branch names not found"
482 500 " - rebuilding revision branch cache from scratch\n")
483 501 self._clear()
484 502 else:
485 503 # rev/node map has changed, invalidate the cache from here up
486 504 self._repo.ui.debug("history modification detected - truncating "
487 505 "revision branch cache to revision %d\n" % rev)
488 506 truncate = rbcrevidx + _rbcrecsize
489 507 del self._rbcrevs[truncate:]
490 508 self._rbcrevslen = min(self._rbcrevslen, truncate)
491 509
492 510 # fall back to slow path and make sure it will be written to disk
493 511 return self._branchinfo(rev)
494 512
495 513 def _branchinfo(self, rev):
496 514 """Retrieve branch info from changelog and update _rbcrevs"""
497 515 changelog = self._repo.changelog
498 516 b, close = changelog.branchinfo(rev)
499 517 if b in self._namesreverse:
500 518 branchidx = self._namesreverse[b]
501 519 else:
502 520 branchidx = len(self._names)
503 521 self._names.append(b)
504 522 self._namesreverse[b] = branchidx
505 523 reponode = changelog.node(rev)
506 524 if close:
507 525 branchidx |= _rbccloseflag
508 526 self._setcachedata(rev, reponode, branchidx)
509 527 return b, close
510 528
511 529 def setdata(self, branch, rev, node, close):
512 530 """add new data information to the cache"""
513 531 if branch in self._namesreverse:
514 532 branchidx = self._namesreverse[branch]
515 533 else:
516 534 branchidx = len(self._names)
517 535 self._names.append(branch)
518 536 self._namesreverse[branch] = branchidx
519 537 if close:
520 538 branchidx |= _rbccloseflag
521 539 self._setcachedata(rev, node, branchidx)
522 540 # If no cache data were readable (non exists, bad permission, etc)
523 541 # the cache was bypassing itself by setting:
524 542 #
525 543 # self.branchinfo = self._branchinfo
526 544 #
527 545 # Since we now have data in the cache, we need to drop this bypassing.
528 546 if r'branchinfo' in vars(self):
529 547 del self.branchinfo
530 548
531 549 def _setcachedata(self, rev, node, branchidx):
532 550 """Writes the node's branch data to the in-memory cache data."""
533 551 if rev == nullrev:
534 552 return
535 553 rbcrevidx = rev * _rbcrecsize
536 554 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
537 555 self._rbcrevs.extend('\0' *
538 556 (len(self._repo.changelog) * _rbcrecsize -
539 557 len(self._rbcrevs)))
540 558 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
541 559 self._rbcrevslen = min(self._rbcrevslen, rev)
542 560
543 561 tr = self._repo.currenttransaction()
544 562 if tr:
545 563 tr.addfinalize('write-revbranchcache', self.write)
546 564
547 565 def write(self, tr=None):
548 566 """Save branch cache if it is dirty."""
549 567 repo = self._repo
550 568 wlock = None
551 569 step = ''
552 570 try:
553 571 if self._rbcnamescount < len(self._names):
554 572 step = ' names'
555 573 wlock = repo.wlock(wait=False)
556 574 if self._rbcnamescount != 0:
557 575 f = repo.cachevfs.open(_rbcnames, 'ab')
558 576 if f.tell() == self._rbcsnameslen:
559 577 f.write('\0')
560 578 else:
561 579 f.close()
562 580 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
563 581 self._rbcnamescount = 0
564 582 self._rbcrevslen = 0
565 583 if self._rbcnamescount == 0:
566 584 # before rewriting names, make sure references are removed
567 585 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
568 586 f = repo.cachevfs.open(_rbcnames, 'wb')
569 587 f.write('\0'.join(encoding.fromlocal(b)
570 588 for b in self._names[self._rbcnamescount:]))
571 589 self._rbcsnameslen = f.tell()
572 590 f.close()
573 591 self._rbcnamescount = len(self._names)
574 592
575 593 start = self._rbcrevslen * _rbcrecsize
576 594 if start != len(self._rbcrevs):
577 595 step = ''
578 596 if wlock is None:
579 597 wlock = repo.wlock(wait=False)
580 598 revs = min(len(repo.changelog),
581 599 len(self._rbcrevs) // _rbcrecsize)
582 600 f = repo.cachevfs.open(_rbcrevs, 'ab')
583 601 if f.tell() != start:
584 602 repo.ui.debug("truncating cache/%s to %d\n"
585 603 % (_rbcrevs, start))
586 604 f.seek(start)
587 605 if f.tell() != start:
588 606 start = 0
589 607 f.seek(start)
590 608 f.truncate()
591 609 end = revs * _rbcrecsize
592 610 f.write(self._rbcrevs[start:end])
593 611 f.close()
594 612 self._rbcrevslen = revs
595 613 except (IOError, OSError, error.Abort, error.LockError) as inst:
596 614 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
597 615 % (step, stringutil.forcebytestr(inst)))
598 616 finally:
599 617 if wlock is not None:
600 618 wlock.release()
@@ -1,3092 +1,3092
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 95 unfi = repo.unfiltered()
96 96 try:
97 97 return unfi.__dict__[self.sname]
98 98 except KeyError:
99 99 pass
100 100 return super(_basefilecache, self).__get__(unfi, type)
101 101
102 102 def set(self, repo, value):
103 103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104 104
105 105 class repofilecache(_basefilecache):
106 106 """filecache for files in .hg but outside of .hg/store"""
107 107 def __init__(self, *paths):
108 108 super(repofilecache, self).__init__(*paths)
109 109 for path in paths:
110 110 _cachedfiles.add((path, 'plain'))
111 111
112 112 def join(self, obj, fname):
113 113 return obj.vfs.join(fname)
114 114
115 115 class storecache(_basefilecache):
116 116 """filecache for files in the store"""
117 117 def __init__(self, *paths):
118 118 super(storecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, ''))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.sjoin(fname)
124 124
125 125 def isfilecached(repo, name):
126 126 """check if a repo has already cached "name" filecache-ed property
127 127
128 128 This returns (cachedobj-or-None, iscached) tuple.
129 129 """
130 130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 131 if not cacheentry:
132 132 return None, False
133 133 return cacheentry.obj, True
134 134
135 135 class unfilteredpropertycache(util.propertycache):
136 136 """propertycache that apply to unfiltered repo only"""
137 137
138 138 def __get__(self, repo, type=None):
139 139 unfi = repo.unfiltered()
140 140 if unfi is repo:
141 141 return super(unfilteredpropertycache, self).__get__(unfi)
142 142 return getattr(unfi, self.name)
143 143
144 144 class filteredpropertycache(util.propertycache):
145 145 """propertycache that must take filtering in account"""
146 146
147 147 def cachevalue(self, obj, value):
148 148 object.__setattr__(obj, self.name, value)
149 149
150 150
151 151 def hasunfilteredcache(repo, name):
152 152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 153 return name in vars(repo.unfiltered())
154 154
155 155 def unfilteredmethod(orig):
156 156 """decorate method that always need to be run on unfiltered version"""
157 157 def wrapper(repo, *args, **kwargs):
158 158 return orig(repo.unfiltered(), *args, **kwargs)
159 159 return wrapper
160 160
161 161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 162 'unbundle'}
163 163 legacycaps = moderncaps.union({'changegroupsubset'})
164 164
165 165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 166 class localcommandexecutor(object):
167 167 def __init__(self, peer):
168 168 self._peer = peer
169 169 self._sent = False
170 170 self._closed = False
171 171
172 172 def __enter__(self):
173 173 return self
174 174
175 175 def __exit__(self, exctype, excvalue, exctb):
176 176 self.close()
177 177
178 178 def callcommand(self, command, args):
179 179 if self._sent:
180 180 raise error.ProgrammingError('callcommand() cannot be used after '
181 181 'sendcommands()')
182 182
183 183 if self._closed:
184 184 raise error.ProgrammingError('callcommand() cannot be used after '
185 185 'close()')
186 186
187 187 # We don't need to support anything fancy. Just call the named
188 188 # method on the peer and return a resolved future.
189 189 fn = getattr(self._peer, pycompat.sysstr(command))
190 190
191 191 f = pycompat.futures.Future()
192 192
193 193 try:
194 194 result = fn(**pycompat.strkwargs(args))
195 195 except Exception:
196 196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 197 else:
198 198 f.set_result(result)
199 199
200 200 return f
201 201
202 202 def sendcommands(self):
203 203 self._sent = True
204 204
205 205 def close(self):
206 206 self._closed = True
207 207
208 208 @interfaceutil.implementer(repository.ipeercommands)
209 209 class localpeer(repository.peer):
210 210 '''peer for a local repo; reflects only the most recent API'''
211 211
212 212 def __init__(self, repo, caps=None):
213 213 super(localpeer, self).__init__()
214 214
215 215 if caps is None:
216 216 caps = moderncaps.copy()
217 217 self._repo = repo.filtered('served')
218 218 self.ui = repo.ui
219 219 self._caps = repo._restrictcapabilities(caps)
220 220
221 221 # Begin of _basepeer interface.
222 222
223 223 def url(self):
224 224 return self._repo.url()
225 225
226 226 def local(self):
227 227 return self._repo
228 228
229 229 def peer(self):
230 230 return self
231 231
232 232 def canpush(self):
233 233 return True
234 234
235 235 def close(self):
236 236 self._repo.close()
237 237
238 238 # End of _basepeer interface.
239 239
240 240 # Begin of _basewirecommands interface.
241 241
242 242 def branchmap(self):
243 243 return self._repo.branchmap()
244 244
245 245 def capabilities(self):
246 246 return self._caps
247 247
248 248 def clonebundles(self):
249 249 return self._repo.tryread('clonebundles.manifest')
250 250
251 251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 252 """Used to test argument passing over the wire"""
253 253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 254 pycompat.bytestr(four),
255 255 pycompat.bytestr(five))
256 256
257 257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 258 **kwargs):
259 259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 260 common=common, bundlecaps=bundlecaps,
261 261 **kwargs)[1]
262 262 cb = util.chunkbuffer(chunks)
263 263
264 264 if exchange.bundle2requested(bundlecaps):
265 265 # When requesting a bundle2, getbundle returns a stream to make the
266 266 # wire level function happier. We need to build a proper object
267 267 # from it in local peer.
268 268 return bundle2.getunbundler(self.ui, cb)
269 269 else:
270 270 return changegroup.getunbundler('01', cb, None)
271 271
272 272 def heads(self):
273 273 return self._repo.heads()
274 274
275 275 def known(self, nodes):
276 276 return self._repo.known(nodes)
277 277
278 278 def listkeys(self, namespace):
279 279 return self._repo.listkeys(namespace)
280 280
281 281 def lookup(self, key):
282 282 return self._repo.lookup(key)
283 283
284 284 def pushkey(self, namespace, key, old, new):
285 285 return self._repo.pushkey(namespace, key, old, new)
286 286
287 287 def stream_out(self):
288 288 raise error.Abort(_('cannot perform stream clone against local '
289 289 'peer'))
290 290
291 291 def unbundle(self, bundle, heads, url):
292 292 """apply a bundle on a repo
293 293
294 294 This function handles the repo locking itself."""
295 295 try:
296 296 try:
297 297 bundle = exchange.readbundle(self.ui, bundle, None)
298 298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 299 if util.safehasattr(ret, 'getchunks'):
300 300 # This is a bundle20 object, turn it into an unbundler.
301 301 # This little dance should be dropped eventually when the
302 302 # API is finally improved.
303 303 stream = util.chunkbuffer(ret.getchunks())
304 304 ret = bundle2.getunbundler(self.ui, stream)
305 305 return ret
306 306 except Exception as exc:
307 307 # If the exception contains output salvaged from a bundle2
308 308 # reply, we need to make sure it is printed before continuing
309 309 # to fail. So we build a bundle2 with such output and consume
310 310 # it directly.
311 311 #
312 312 # This is not very elegant but allows a "simple" solution for
313 313 # issue4594
314 314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 315 if output:
316 316 bundler = bundle2.bundle20(self._repo.ui)
317 317 for out in output:
318 318 bundler.addpart(out)
319 319 stream = util.chunkbuffer(bundler.getchunks())
320 320 b = bundle2.getunbundler(self.ui, stream)
321 321 bundle2.processbundle(self._repo, b)
322 322 raise
323 323 except error.PushRaced as exc:
324 324 raise error.ResponseError(_('push failed:'),
325 325 stringutil.forcebytestr(exc))
326 326
327 327 # End of _basewirecommands interface.
328 328
329 329 # Begin of peer interface.
330 330
331 331 def commandexecutor(self):
332 332 return localcommandexecutor(self)
333 333
334 334 # End of peer interface.
335 335
336 336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 337 class locallegacypeer(localpeer):
338 338 '''peer extension which implements legacy methods too; used for tests with
339 339 restricted capabilities'''
340 340
341 341 def __init__(self, repo):
342 342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343 343
344 344 # Begin of baselegacywirecommands interface.
345 345
346 346 def between(self, pairs):
347 347 return self._repo.between(pairs)
348 348
349 349 def branches(self, nodes):
350 350 return self._repo.branches(nodes)
351 351
352 352 def changegroup(self, nodes, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 354 missingheads=self._repo.heads())
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 def changegroupsubset(self, bases, heads, source):
358 358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 359 missingheads=heads)
360 360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361 361
362 362 # End of baselegacywirecommands interface.
363 363
364 364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 365 # clients.
366 366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367 367
368 368 # A repository with the sparserevlog feature will have delta chains that
369 369 # can spread over a larger span. Sparse reading cuts these large spans into
370 370 # pieces, so that each piece isn't too big.
371 371 # Without the sparserevlog capability, reading from the repository could use
372 372 # huge amounts of memory, because the whole span would be read at once,
373 373 # including all the intermediate revisions that aren't pertinent for the chain.
374 374 # This is why once a repository has enabled sparse-read, it becomes required.
375 375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376 376
377 377 # Functions receiving (ui, features) that extensions can register to impact
378 378 # the ability to load repositories with custom requirements. Only
379 379 # functions defined in loaded extensions are called.
380 380 #
381 381 # The function receives a set of requirement strings that the repository
382 382 # is capable of opening. Functions will typically add elements to the
383 383 # set to reflect that the extension knows how to handle that requirements.
384 384 featuresetupfuncs = set()
385 385
386 386 def makelocalrepository(baseui, path, intents=None):
387 387 """Create a local repository object.
388 388
389 389 Given arguments needed to construct a local repository, this function
390 390 performs various early repository loading functionality (such as
391 391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 392 the repository can be opened, derives a type suitable for representing
393 393 that repository, and returns an instance of it.
394 394
395 395 The returned object conforms to the ``repository.completelocalrepository``
396 396 interface.
397 397
398 398 The repository type is derived by calling a series of factory functions
399 399 for each aspect/interface of the final repository. These are defined by
400 400 ``REPO_INTERFACES``.
401 401
402 402 Each factory function is called to produce a type implementing a specific
403 403 interface. The cumulative list of returned types will be combined into a
404 404 new type and that type will be instantiated to represent the local
405 405 repository.
406 406
407 407 The factory functions each receive various state that may be consulted
408 408 as part of deriving a type.
409 409
410 410 Extensions should wrap these factory functions to customize repository type
411 411 creation. Note that an extension's wrapped function may be called even if
412 412 that extension is not loaded for the repo being constructed. Extensions
413 413 should check if their ``__name__`` appears in the
414 414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 415 not.
416 416 """
417 417 ui = baseui.copy()
418 418 # Prevent copying repo configuration.
419 419 ui.copy = baseui.copy
420 420
421 421 # Working directory VFS rooted at repository root.
422 422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423 423
424 424 # Main VFS for .hg/ directory.
425 425 hgpath = wdirvfs.join(b'.hg')
426 426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427 427
428 428 # The .hg/ path should exist and should be a directory. All other
429 429 # cases are errors.
430 430 if not hgvfs.isdir():
431 431 try:
432 432 hgvfs.stat()
433 433 except OSError as e:
434 434 if e.errno != errno.ENOENT:
435 435 raise
436 436
437 437 raise error.RepoError(_(b'repository %s not found') % path)
438 438
439 439 # .hg/requires file contains a newline-delimited list of
440 440 # features/capabilities the opener (us) must have in order to use
441 441 # the repository. This file was introduced in Mercurial 0.9.2,
442 442 # which means very old repositories may not have one. We assume
443 443 # a missing file translates to no requirements.
444 444 try:
445 445 requirements = set(hgvfs.read(b'requires').splitlines())
446 446 except IOError as e:
447 447 if e.errno != errno.ENOENT:
448 448 raise
449 449 requirements = set()
450 450
451 451 # The .hg/hgrc file may load extensions or contain config options
452 452 # that influence repository construction. Attempt to load it and
453 453 # process any new extensions that it may have pulled in.
454 454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 456 extensions.loadall(ui)
457 457 extensions.populateui(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511 wcachepath = hgvfs.join(b'wcache')
512 512
513 513
514 514 # The store has changed over time and the exact layout is dictated by
515 515 # requirements. The store interface abstracts differences across all
516 516 # of them.
517 517 store = makestore(requirements, storebasepath,
518 518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 519 hgvfs.createmode = store.createmode
520 520
521 521 storevfs = store.vfs
522 522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523 523
524 524 # The cache vfs is used to manage cache files.
525 525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 526 cachevfs.createmode = store.createmode
527 527 # The cache vfs is used to manage cache files related to the working copy
528 528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 529 wcachevfs.createmode = store.createmode
530 530
531 531 # Now resolve the type for the repository object. We do this by repeatedly
532 532 # calling a factory function to produces types for specific aspects of the
533 533 # repo's operation. The aggregate returned types are used as base classes
534 534 # for a dynamically-derived type, which will represent our new repository.
535 535
536 536 bases = []
537 537 extrastate = {}
538 538
539 539 for iface, fn in REPO_INTERFACES:
540 540 # We pass all potentially useful state to give extensions tons of
541 541 # flexibility.
542 542 typ = fn()(ui=ui,
543 543 intents=intents,
544 544 requirements=requirements,
545 545 features=features,
546 546 wdirvfs=wdirvfs,
547 547 hgvfs=hgvfs,
548 548 store=store,
549 549 storevfs=storevfs,
550 550 storeoptions=storevfs.options,
551 551 cachevfs=cachevfs,
552 552 wcachevfs=wcachevfs,
553 553 extensionmodulenames=extensionmodulenames,
554 554 extrastate=extrastate,
555 555 baseclasses=bases)
556 556
557 557 if not isinstance(typ, type):
558 558 raise error.ProgrammingError('unable to construct type for %s' %
559 559 iface)
560 560
561 561 bases.append(typ)
562 562
563 563 # type() allows you to use characters in type names that wouldn't be
564 564 # recognized as Python symbols in source code. We abuse that to add
565 565 # rich information about our constructed repo.
566 566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 567 wdirvfs.base,
568 568 b','.join(sorted(requirements))))
569 569
570 570 cls = type(name, tuple(bases), {})
571 571
572 572 return cls(
573 573 baseui=baseui,
574 574 ui=ui,
575 575 origroot=path,
576 576 wdirvfs=wdirvfs,
577 577 hgvfs=hgvfs,
578 578 requirements=requirements,
579 579 supportedrequirements=supportedrequirements,
580 580 sharedpath=storebasepath,
581 581 store=store,
582 582 cachevfs=cachevfs,
583 583 wcachevfs=wcachevfs,
584 584 features=features,
585 585 intents=intents)
586 586
587 587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 588 """Load hgrc files/content into a ui instance.
589 589
590 590 This is called during repository opening to load any additional
591 591 config files or settings relevant to the current repository.
592 592
593 593 Returns a bool indicating whether any additional configs were loaded.
594 594
595 595 Extensions should monkeypatch this function to modify how per-repo
596 596 configs are loaded. For example, an extension may wish to pull in
597 597 configs from alternate files or sources.
598 598 """
599 599 try:
600 600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 601 return True
602 602 except IOError:
603 603 return False
604 604
605 605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 606 """Perform additional actions after .hg/hgrc is loaded.
607 607
608 608 This function is called during repository loading immediately after
609 609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610 610
611 611 The function can be used to validate configs, automatically add
612 612 options (including extensions) based on requirements, etc.
613 613 """
614 614
615 615 # Map of requirements to list of extensions to load automatically when
616 616 # requirement is present.
617 617 autoextensions = {
618 618 b'largefiles': [b'largefiles'],
619 619 b'lfs': [b'lfs'],
620 620 }
621 621
622 622 for requirement, names in sorted(autoextensions.items()):
623 623 if requirement not in requirements:
624 624 continue
625 625
626 626 for name in names:
627 627 if not ui.hasconfig(b'extensions', name):
628 628 ui.setconfig(b'extensions', name, b'', source='autoload')
629 629
630 630 def gathersupportedrequirements(ui):
631 631 """Determine the complete set of recognized requirements."""
632 632 # Start with all requirements supported by this file.
633 633 supported = set(localrepository._basesupported)
634 634
635 635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 636 # relevant to this ui instance.
637 637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638 638
639 639 for fn in featuresetupfuncs:
640 640 if fn.__module__ in modules:
641 641 fn(ui, supported)
642 642
643 643 # Add derived requirements from registered compression engines.
644 644 for name in util.compengines:
645 645 engine = util.compengines[name]
646 646 if engine.revlogheader():
647 647 supported.add(b'exp-compression-%s' % name)
648 648
649 649 return supported
650 650
651 651 def ensurerequirementsrecognized(requirements, supported):
652 652 """Validate that a set of local requirements is recognized.
653 653
654 654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 655 exists any requirement in that set that currently loaded code doesn't
656 656 recognize.
657 657
658 658 Returns a set of supported requirements.
659 659 """
660 660 missing = set()
661 661
662 662 for requirement in requirements:
663 663 if requirement in supported:
664 664 continue
665 665
666 666 if not requirement or not requirement[0:1].isalnum():
667 667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668 668
669 669 missing.add(requirement)
670 670
671 671 if missing:
672 672 raise error.RequirementError(
673 673 _(b'repository requires features unknown to this Mercurial: %s') %
674 674 b' '.join(sorted(missing)),
675 675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 676 b'for more information'))
677 677
678 678 def ensurerequirementscompatible(ui, requirements):
679 679 """Validates that a set of recognized requirements is mutually compatible.
680 680
681 681 Some requirements may not be compatible with others or require
682 682 config options that aren't enabled. This function is called during
683 683 repository opening to ensure that the set of requirements needed
684 684 to open a repository is sane and compatible with config options.
685 685
686 686 Extensions can monkeypatch this function to perform additional
687 687 checking.
688 688
689 689 ``error.RepoError`` should be raised on failure.
690 690 """
691 691 if b'exp-sparse' in requirements and not sparse.enabled:
692 692 raise error.RepoError(_(b'repository is using sparse feature but '
693 693 b'sparse is not enabled; enable the '
694 694 b'"sparse" extensions to access'))
695 695
696 696 def makestore(requirements, path, vfstype):
697 697 """Construct a storage object for a repository."""
698 698 if b'store' in requirements:
699 699 if b'fncache' in requirements:
700 700 return storemod.fncachestore(path, vfstype,
701 701 b'dotencode' in requirements)
702 702
703 703 return storemod.encodedstore(path, vfstype)
704 704
705 705 return storemod.basicstore(path, vfstype)
706 706
707 707 def resolvestorevfsoptions(ui, requirements, features):
708 708 """Resolve the options to pass to the store vfs opener.
709 709
710 710 The returned dict is used to influence behavior of the storage layer.
711 711 """
712 712 options = {}
713 713
714 714 if b'treemanifest' in requirements:
715 715 options[b'treemanifest'] = True
716 716
717 717 # experimental config: format.manifestcachesize
718 718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 719 if manifestcachesize is not None:
720 720 options[b'manifestcachesize'] = manifestcachesize
721 721
722 722 # In the absence of another requirement superseding a revlog-related
723 723 # requirement, we have to assume the repo is using revlog version 0.
724 724 # This revlog format is super old and we don't bother trying to parse
725 725 # opener options for it because those options wouldn't do anything
726 726 # meaningful on such old repos.
727 727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729 729
730 730 return options
731 731
732 732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 733 """Resolve opener options specific to revlogs."""
734 734
735 735 options = {}
736 736 options[b'flagprocessors'] = {}
737 737
738 738 if b'revlogv1' in requirements:
739 739 options[b'revlogv1'] = True
740 740 if REVLOGV2_REQUIREMENT in requirements:
741 741 options[b'revlogv2'] = True
742 742
743 743 if b'generaldelta' in requirements:
744 744 options[b'generaldelta'] = True
745 745
746 746 # experimental config: format.chunkcachesize
747 747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 748 if chunkcachesize is not None:
749 749 options[b'chunkcachesize'] = chunkcachesize
750 750
751 751 deltabothparents = ui.configbool(b'storage',
752 752 b'revlog.optimize-delta-parent-choice')
753 753 options[b'deltabothparents'] = deltabothparents
754 754
755 755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
756 756 lazydeltabase = False
757 757 if lazydelta:
758 758 lazydeltabase = ui.configbool(b'storage',
759 759 b'revlog.reuse-external-delta-parent')
760 760 if lazydeltabase is None:
761 761 lazydeltabase = not scmutil.gddeltaconfig(ui)
762 762 options[b'lazydelta'] = lazydelta
763 763 options[b'lazydeltabase'] = lazydeltabase
764 764
765 765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
766 766 if 0 <= chainspan:
767 767 options[b'maxdeltachainspan'] = chainspan
768 768
769 769 mmapindexthreshold = ui.configbytes(b'experimental',
770 770 b'mmapindexthreshold')
771 771 if mmapindexthreshold is not None:
772 772 options[b'mmapindexthreshold'] = mmapindexthreshold
773 773
774 774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
775 775 srdensitythres = float(ui.config(b'experimental',
776 776 b'sparse-read.density-threshold'))
777 777 srmingapsize = ui.configbytes(b'experimental',
778 778 b'sparse-read.min-gap-size')
779 779 options[b'with-sparse-read'] = withsparseread
780 780 options[b'sparse-read-density-threshold'] = srdensitythres
781 781 options[b'sparse-read-min-gap-size'] = srmingapsize
782 782
783 783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
784 784 options[b'sparse-revlog'] = sparserevlog
785 785 if sparserevlog:
786 786 options[b'generaldelta'] = True
787 787
788 788 maxchainlen = None
789 789 if sparserevlog:
790 790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
791 791 # experimental config: format.maxchainlen
792 792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
793 793 if maxchainlen is not None:
794 794 options[b'maxchainlen'] = maxchainlen
795 795
796 796 for r in requirements:
797 797 if r.startswith(b'exp-compression-'):
798 798 options[b'compengine'] = r[len(b'exp-compression-'):]
799 799
800 800 if repository.NARROW_REQUIREMENT in requirements:
801 801 options[b'enableellipsis'] = True
802 802
803 803 return options
804 804
805 805 def makemain(**kwargs):
806 806 """Produce a type conforming to ``ilocalrepositorymain``."""
807 807 return localrepository
808 808
809 809 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
810 810 class revlogfilestorage(object):
811 811 """File storage when using revlogs."""
812 812
813 813 def file(self, path):
814 814 if path[0] == b'/':
815 815 path = path[1:]
816 816
817 817 return filelog.filelog(self.svfs, path)
818 818
819 819 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
820 820 class revlognarrowfilestorage(object):
821 821 """File storage when using revlogs and narrow files."""
822 822
823 823 def file(self, path):
824 824 if path[0] == b'/':
825 825 path = path[1:]
826 826
827 827 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
828 828
829 829 def makefilestorage(requirements, features, **kwargs):
830 830 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
831 831 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
832 832 features.add(repository.REPO_FEATURE_STREAM_CLONE)
833 833
834 834 if repository.NARROW_REQUIREMENT in requirements:
835 835 return revlognarrowfilestorage
836 836 else:
837 837 return revlogfilestorage
838 838
839 839 # List of repository interfaces and factory functions for them. Each
840 840 # will be called in order during ``makelocalrepository()`` to iteratively
841 841 # derive the final type for a local repository instance. We capture the
842 842 # function as a lambda so we don't hold a reference and the module-level
843 843 # functions can be wrapped.
844 844 REPO_INTERFACES = [
845 845 (repository.ilocalrepositorymain, lambda: makemain),
846 846 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
847 847 ]
848 848
849 849 @interfaceutil.implementer(repository.ilocalrepositorymain)
850 850 class localrepository(object):
851 851 """Main class for representing local repositories.
852 852
853 853 All local repositories are instances of this class.
854 854
855 855 Constructed on its own, instances of this class are not usable as
856 856 repository objects. To obtain a usable repository object, call
857 857 ``hg.repository()``, ``localrepo.instance()``, or
858 858 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
859 859 ``instance()`` adds support for creating new repositories.
860 860 ``hg.repository()`` adds more extension integration, including calling
861 861 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
862 862 used.
863 863 """
864 864
865 865 # obsolete experimental requirements:
866 866 # - manifestv2: An experimental new manifest format that allowed
867 867 # for stem compression of long paths. Experiment ended up not
868 868 # being successful (repository sizes went up due to worse delta
869 869 # chains), and the code was deleted in 4.6.
870 870 supportedformats = {
871 871 'revlogv1',
872 872 'generaldelta',
873 873 'treemanifest',
874 874 REVLOGV2_REQUIREMENT,
875 875 SPARSEREVLOG_REQUIREMENT,
876 876 }
877 877 _basesupported = supportedformats | {
878 878 'store',
879 879 'fncache',
880 880 'shared',
881 881 'relshared',
882 882 'dotencode',
883 883 'exp-sparse',
884 884 'internal-phase'
885 885 }
886 886
887 887 # list of prefix for file which can be written without 'wlock'
888 888 # Extensions should extend this list when needed
889 889 _wlockfreeprefix = {
890 890 # We migh consider requiring 'wlock' for the next
891 891 # two, but pretty much all the existing code assume
892 892 # wlock is not needed so we keep them excluded for
893 893 # now.
894 894 'hgrc',
895 895 'requires',
896 896 # XXX cache is a complicatged business someone
897 897 # should investigate this in depth at some point
898 898 'cache/',
899 899 # XXX shouldn't be dirstate covered by the wlock?
900 900 'dirstate',
901 901 # XXX bisect was still a bit too messy at the time
902 902 # this changeset was introduced. Someone should fix
903 903 # the remainig bit and drop this line
904 904 'bisect.state',
905 905 }
906 906
907 907 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
908 908 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
909 909 features, intents=None):
910 910 """Create a new local repository instance.
911 911
912 912 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
913 913 or ``localrepo.makelocalrepository()`` for obtaining a new repository
914 914 object.
915 915
916 916 Arguments:
917 917
918 918 baseui
919 919 ``ui.ui`` instance that ``ui`` argument was based off of.
920 920
921 921 ui
922 922 ``ui.ui`` instance for use by the repository.
923 923
924 924 origroot
925 925 ``bytes`` path to working directory root of this repository.
926 926
927 927 wdirvfs
928 928 ``vfs.vfs`` rooted at the working directory.
929 929
930 930 hgvfs
931 931 ``vfs.vfs`` rooted at .hg/
932 932
933 933 requirements
934 934 ``set`` of bytestrings representing repository opening requirements.
935 935
936 936 supportedrequirements
937 937 ``set`` of bytestrings representing repository requirements that we
938 938 know how to open. May be a supetset of ``requirements``.
939 939
940 940 sharedpath
941 941 ``bytes`` Defining path to storage base directory. Points to a
942 942 ``.hg/`` directory somewhere.
943 943
944 944 store
945 945 ``store.basicstore`` (or derived) instance providing access to
946 946 versioned storage.
947 947
948 948 cachevfs
949 949 ``vfs.vfs`` used for cache files.
950 950
951 951 wcachevfs
952 952 ``vfs.vfs`` used for cache files related to the working copy.
953 953
954 954 features
955 955 ``set`` of bytestrings defining features/capabilities of this
956 956 instance.
957 957
958 958 intents
959 959 ``set`` of system strings indicating what this repo will be used
960 960 for.
961 961 """
962 962 self.baseui = baseui
963 963 self.ui = ui
964 964 self.origroot = origroot
965 965 # vfs rooted at working directory.
966 966 self.wvfs = wdirvfs
967 967 self.root = wdirvfs.base
968 968 # vfs rooted at .hg/. Used to access most non-store paths.
969 969 self.vfs = hgvfs
970 970 self.path = hgvfs.base
971 971 self.requirements = requirements
972 972 self.supported = supportedrequirements
973 973 self.sharedpath = sharedpath
974 974 self.store = store
975 975 self.cachevfs = cachevfs
976 976 self.wcachevfs = wcachevfs
977 977 self.features = features
978 978
979 979 self.filtername = None
980 980
981 981 if (self.ui.configbool('devel', 'all-warnings') or
982 982 self.ui.configbool('devel', 'check-locks')):
983 983 self.vfs.audit = self._getvfsward(self.vfs.audit)
984 984 # A list of callback to shape the phase if no data were found.
985 985 # Callback are in the form: func(repo, roots) --> processed root.
986 986 # This list it to be filled by extension during repo setup
987 987 self._phasedefaults = []
988 988
989 989 color.setup(self.ui)
990 990
991 991 self.spath = self.store.path
992 992 self.svfs = self.store.vfs
993 993 self.sjoin = self.store.join
994 994 if (self.ui.configbool('devel', 'all-warnings') or
995 995 self.ui.configbool('devel', 'check-locks')):
996 996 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
997 997 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
998 998 else: # standard vfs
999 999 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1000 1000
1001 1001 self._dirstatevalidatewarned = False
1002 1002
1003 1003 self._branchcaches = branchmap.BranchMapCache()
1004 1004 self._revbranchcache = None
1005 1005 self._filterpats = {}
1006 1006 self._datafilters = {}
1007 1007 self._transref = self._lockref = self._wlockref = None
1008 1008
1009 1009 # A cache for various files under .hg/ that tracks file changes,
1010 1010 # (used by the filecache decorator)
1011 1011 #
1012 1012 # Maps a property name to its util.filecacheentry
1013 1013 self._filecache = {}
1014 1014
1015 1015 # hold sets of revision to be filtered
1016 1016 # should be cleared when something might have changed the filter value:
1017 1017 # - new changesets,
1018 1018 # - phase change,
1019 1019 # - new obsolescence marker,
1020 1020 # - working directory parent change,
1021 1021 # - bookmark changes
1022 1022 self.filteredrevcache = {}
1023 1023
1024 1024 # post-dirstate-status hooks
1025 1025 self._postdsstatus = []
1026 1026
1027 1027 # generic mapping between names and nodes
1028 1028 self.names = namespaces.namespaces()
1029 1029
1030 1030 # Key to signature value.
1031 1031 self._sparsesignaturecache = {}
1032 1032 # Signature to cached matcher instance.
1033 1033 self._sparsematchercache = {}
1034 1034
1035 1035 def _getvfsward(self, origfunc):
1036 1036 """build a ward for self.vfs"""
1037 1037 rref = weakref.ref(self)
1038 1038 def checkvfs(path, mode=None):
1039 1039 ret = origfunc(path, mode=mode)
1040 1040 repo = rref()
1041 1041 if (repo is None
1042 1042 or not util.safehasattr(repo, '_wlockref')
1043 1043 or not util.safehasattr(repo, '_lockref')):
1044 1044 return
1045 1045 if mode in (None, 'r', 'rb'):
1046 1046 return
1047 1047 if path.startswith(repo.path):
1048 1048 # truncate name relative to the repository (.hg)
1049 1049 path = path[len(repo.path) + 1:]
1050 1050 if path.startswith('cache/'):
1051 1051 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1052 1052 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1053 1053 if path.startswith('journal.') or path.startswith('undo.'):
1054 1054 # journal is covered by 'lock'
1055 1055 if repo._currentlock(repo._lockref) is None:
1056 1056 repo.ui.develwarn('write with no lock: "%s"' % path,
1057 1057 stacklevel=3, config='check-locks')
1058 1058 elif repo._currentlock(repo._wlockref) is None:
1059 1059 # rest of vfs files are covered by 'wlock'
1060 1060 #
1061 1061 # exclude special files
1062 1062 for prefix in self._wlockfreeprefix:
1063 1063 if path.startswith(prefix):
1064 1064 return
1065 1065 repo.ui.develwarn('write with no wlock: "%s"' % path,
1066 1066 stacklevel=3, config='check-locks')
1067 1067 return ret
1068 1068 return checkvfs
1069 1069
1070 1070 def _getsvfsward(self, origfunc):
1071 1071 """build a ward for self.svfs"""
1072 1072 rref = weakref.ref(self)
1073 1073 def checksvfs(path, mode=None):
1074 1074 ret = origfunc(path, mode=mode)
1075 1075 repo = rref()
1076 1076 if repo is None or not util.safehasattr(repo, '_lockref'):
1077 1077 return
1078 1078 if mode in (None, 'r', 'rb'):
1079 1079 return
1080 1080 if path.startswith(repo.sharedpath):
1081 1081 # truncate name relative to the repository (.hg)
1082 1082 path = path[len(repo.sharedpath) + 1:]
1083 1083 if repo._currentlock(repo._lockref) is None:
1084 1084 repo.ui.develwarn('write with no lock: "%s"' % path,
1085 1085 stacklevel=4)
1086 1086 return ret
1087 1087 return checksvfs
1088 1088
1089 1089 def close(self):
1090 1090 self._writecaches()
1091 1091
1092 1092 def _writecaches(self):
1093 1093 if self._revbranchcache:
1094 1094 self._revbranchcache.write()
1095 1095
1096 1096 def _restrictcapabilities(self, caps):
1097 1097 if self.ui.configbool('experimental', 'bundle2-advertise'):
1098 1098 caps = set(caps)
1099 1099 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1100 1100 role='client'))
1101 1101 caps.add('bundle2=' + urlreq.quote(capsblob))
1102 1102 return caps
1103 1103
1104 1104 def _writerequirements(self):
1105 1105 scmutil.writerequires(self.vfs, self.requirements)
1106 1106
1107 1107 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1108 1108 # self -> auditor -> self._checknested -> self
1109 1109
1110 1110 @property
1111 1111 def auditor(self):
1112 1112 # This is only used by context.workingctx.match in order to
1113 1113 # detect files in subrepos.
1114 1114 return pathutil.pathauditor(self.root, callback=self._checknested)
1115 1115
1116 1116 @property
1117 1117 def nofsauditor(self):
1118 1118 # This is only used by context.basectx.match in order to detect
1119 1119 # files in subrepos.
1120 1120 return pathutil.pathauditor(self.root, callback=self._checknested,
1121 1121 realfs=False, cached=True)
1122 1122
1123 1123 def _checknested(self, path):
1124 1124 """Determine if path is a legal nested repository."""
1125 1125 if not path.startswith(self.root):
1126 1126 return False
1127 1127 subpath = path[len(self.root) + 1:]
1128 1128 normsubpath = util.pconvert(subpath)
1129 1129
1130 1130 # XXX: Checking against the current working copy is wrong in
1131 1131 # the sense that it can reject things like
1132 1132 #
1133 1133 # $ hg cat -r 10 sub/x.txt
1134 1134 #
1135 1135 # if sub/ is no longer a subrepository in the working copy
1136 1136 # parent revision.
1137 1137 #
1138 1138 # However, it can of course also allow things that would have
1139 1139 # been rejected before, such as the above cat command if sub/
1140 1140 # is a subrepository now, but was a normal directory before.
1141 1141 # The old path auditor would have rejected by mistake since it
1142 1142 # panics when it sees sub/.hg/.
1143 1143 #
1144 1144 # All in all, checking against the working copy seems sensible
1145 1145 # since we want to prevent access to nested repositories on
1146 1146 # the filesystem *now*.
1147 1147 ctx = self[None]
1148 1148 parts = util.splitpath(subpath)
1149 1149 while parts:
1150 1150 prefix = '/'.join(parts)
1151 1151 if prefix in ctx.substate:
1152 1152 if prefix == normsubpath:
1153 1153 return True
1154 1154 else:
1155 1155 sub = ctx.sub(prefix)
1156 1156 return sub.checknested(subpath[len(prefix) + 1:])
1157 1157 else:
1158 1158 parts.pop()
1159 1159 return False
1160 1160
1161 1161 def peer(self):
1162 1162 return localpeer(self) # not cached to avoid reference cycle
1163 1163
1164 1164 def unfiltered(self):
1165 1165 """Return unfiltered version of the repository
1166 1166
1167 1167 Intended to be overwritten by filtered repo."""
1168 1168 return self
1169 1169
1170 1170 def filtered(self, name, visibilityexceptions=None):
1171 1171 """Return a filtered version of a repository"""
1172 1172 cls = repoview.newtype(self.unfiltered().__class__)
1173 1173 return cls(self, name, visibilityexceptions)
1174 1174
1175 1175 @repofilecache('bookmarks', 'bookmarks.current')
1176 1176 def _bookmarks(self):
1177 1177 return bookmarks.bmstore(self)
1178 1178
1179 1179 @property
1180 1180 def _activebookmark(self):
1181 1181 return self._bookmarks.active
1182 1182
1183 1183 # _phasesets depend on changelog. what we need is to call
1184 1184 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1185 1185 # can't be easily expressed in filecache mechanism.
1186 1186 @storecache('phaseroots', '00changelog.i')
1187 1187 def _phasecache(self):
1188 1188 return phases.phasecache(self, self._phasedefaults)
1189 1189
1190 1190 @storecache('obsstore')
1191 1191 def obsstore(self):
1192 1192 return obsolete.makestore(self.ui, self)
1193 1193
1194 1194 @storecache('00changelog.i')
1195 1195 def changelog(self):
1196 1196 return changelog.changelog(self.svfs,
1197 1197 trypending=txnutil.mayhavepending(self.root))
1198 1198
1199 1199 @storecache('00manifest.i')
1200 1200 def manifestlog(self):
1201 1201 rootstore = manifest.manifestrevlog(self.svfs)
1202 1202 return manifest.manifestlog(self.svfs, self, rootstore,
1203 1203 self._storenarrowmatch)
1204 1204
1205 1205 @repofilecache('dirstate')
1206 1206 def dirstate(self):
1207 1207 return self._makedirstate()
1208 1208
1209 1209 def _makedirstate(self):
1210 1210 """Extension point for wrapping the dirstate per-repo."""
1211 1211 sparsematchfn = lambda: sparse.matcher(self)
1212 1212
1213 1213 return dirstate.dirstate(self.vfs, self.ui, self.root,
1214 1214 self._dirstatevalidate, sparsematchfn)
1215 1215
1216 1216 def _dirstatevalidate(self, node):
1217 1217 try:
1218 1218 self.changelog.rev(node)
1219 1219 return node
1220 1220 except error.LookupError:
1221 1221 if not self._dirstatevalidatewarned:
1222 1222 self._dirstatevalidatewarned = True
1223 1223 self.ui.warn(_("warning: ignoring unknown"
1224 1224 " working parent %s!\n") % short(node))
1225 1225 return nullid
1226 1226
1227 1227 @storecache(narrowspec.FILENAME)
1228 1228 def narrowpats(self):
1229 1229 """matcher patterns for this repository's narrowspec
1230 1230
1231 1231 A tuple of (includes, excludes).
1232 1232 """
1233 1233 return narrowspec.load(self)
1234 1234
1235 1235 @storecache(narrowspec.FILENAME)
1236 1236 def _storenarrowmatch(self):
1237 1237 if repository.NARROW_REQUIREMENT not in self.requirements:
1238 1238 return matchmod.always()
1239 1239 include, exclude = self.narrowpats
1240 1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1241 1241
1242 1242 @storecache(narrowspec.FILENAME)
1243 1243 def _narrowmatch(self):
1244 1244 if repository.NARROW_REQUIREMENT not in self.requirements:
1245 1245 return matchmod.always()
1246 1246 narrowspec.checkworkingcopynarrowspec(self)
1247 1247 include, exclude = self.narrowpats
1248 1248 return narrowspec.match(self.root, include=include, exclude=exclude)
1249 1249
1250 1250 def narrowmatch(self, match=None, includeexact=False):
1251 1251 """matcher corresponding the the repo's narrowspec
1252 1252
1253 1253 If `match` is given, then that will be intersected with the narrow
1254 1254 matcher.
1255 1255
1256 1256 If `includeexact` is True, then any exact matches from `match` will
1257 1257 be included even if they're outside the narrowspec.
1258 1258 """
1259 1259 if match:
1260 1260 if includeexact and not self._narrowmatch.always():
1261 1261 # do not exclude explicitly-specified paths so that they can
1262 1262 # be warned later on
1263 1263 em = matchmod.exact(match.files())
1264 1264 nm = matchmod.unionmatcher([self._narrowmatch, em])
1265 1265 return matchmod.intersectmatchers(match, nm)
1266 1266 return matchmod.intersectmatchers(match, self._narrowmatch)
1267 1267 return self._narrowmatch
1268 1268
1269 1269 def setnarrowpats(self, newincludes, newexcludes):
1270 1270 narrowspec.save(self, newincludes, newexcludes)
1271 1271 self.invalidate(clearfilecache=True)
1272 1272
1273 1273 def __getitem__(self, changeid):
1274 1274 if changeid is None:
1275 1275 return context.workingctx(self)
1276 1276 if isinstance(changeid, context.basectx):
1277 1277 return changeid
1278 1278 if isinstance(changeid, slice):
1279 1279 # wdirrev isn't contiguous so the slice shouldn't include it
1280 1280 return [self[i]
1281 1281 for i in pycompat.xrange(*changeid.indices(len(self)))
1282 1282 if i not in self.changelog.filteredrevs]
1283 1283 try:
1284 1284 if isinstance(changeid, int):
1285 1285 node = self.changelog.node(changeid)
1286 1286 rev = changeid
1287 1287 elif changeid == 'null':
1288 1288 node = nullid
1289 1289 rev = nullrev
1290 1290 elif changeid == 'tip':
1291 1291 node = self.changelog.tip()
1292 1292 rev = self.changelog.rev(node)
1293 1293 elif changeid == '.':
1294 1294 # this is a hack to delay/avoid loading obsmarkers
1295 1295 # when we know that '.' won't be hidden
1296 1296 node = self.dirstate.p1()
1297 1297 rev = self.unfiltered().changelog.rev(node)
1298 1298 elif len(changeid) == 20:
1299 1299 try:
1300 1300 node = changeid
1301 1301 rev = self.changelog.rev(changeid)
1302 1302 except error.FilteredLookupError:
1303 1303 changeid = hex(changeid) # for the error message
1304 1304 raise
1305 1305 except LookupError:
1306 1306 # check if it might have come from damaged dirstate
1307 1307 #
1308 1308 # XXX we could avoid the unfiltered if we had a recognizable
1309 1309 # exception for filtered changeset access
1310 1310 if (self.local()
1311 1311 and changeid in self.unfiltered().dirstate.parents()):
1312 1312 msg = _("working directory has unknown parent '%s'!")
1313 1313 raise error.Abort(msg % short(changeid))
1314 1314 changeid = hex(changeid) # for the error message
1315 1315 raise
1316 1316
1317 1317 elif len(changeid) == 40:
1318 1318 node = bin(changeid)
1319 1319 rev = self.changelog.rev(node)
1320 1320 else:
1321 1321 raise error.ProgrammingError(
1322 1322 "unsupported changeid '%s' of type %s" %
1323 1323 (changeid, type(changeid)))
1324 1324
1325 1325 return context.changectx(self, rev, node)
1326 1326
1327 1327 except (error.FilteredIndexError, error.FilteredLookupError):
1328 1328 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1329 1329 % pycompat.bytestr(changeid))
1330 1330 except (IndexError, LookupError):
1331 1331 raise error.RepoLookupError(
1332 1332 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1333 1333 except error.WdirUnsupported:
1334 1334 return context.workingctx(self)
1335 1335
1336 1336 def __contains__(self, changeid):
1337 1337 """True if the given changeid exists
1338 1338
1339 1339 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1340 1340 specified.
1341 1341 """
1342 1342 try:
1343 1343 self[changeid]
1344 1344 return True
1345 1345 except error.RepoLookupError:
1346 1346 return False
1347 1347
1348 1348 def __nonzero__(self):
1349 1349 return True
1350 1350
1351 1351 __bool__ = __nonzero__
1352 1352
1353 1353 def __len__(self):
1354 1354 # no need to pay the cost of repoview.changelog
1355 1355 unfi = self.unfiltered()
1356 1356 return len(unfi.changelog)
1357 1357
1358 1358 def __iter__(self):
1359 1359 return iter(self.changelog)
1360 1360
1361 1361 def revs(self, expr, *args):
1362 1362 '''Find revisions matching a revset.
1363 1363
1364 1364 The revset is specified as a string ``expr`` that may contain
1365 1365 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1366 1366
1367 1367 Revset aliases from the configuration are not expanded. To expand
1368 1368 user aliases, consider calling ``scmutil.revrange()`` or
1369 1369 ``repo.anyrevs([expr], user=True)``.
1370 1370
1371 1371 Returns a revset.abstractsmartset, which is a list-like interface
1372 1372 that contains integer revisions.
1373 1373 '''
1374 1374 tree = revsetlang.spectree(expr, *args)
1375 1375 return revset.makematcher(tree)(self)
1376 1376
1377 1377 def set(self, expr, *args):
1378 1378 '''Find revisions matching a revset and emit changectx instances.
1379 1379
1380 1380 This is a convenience wrapper around ``revs()`` that iterates the
1381 1381 result and is a generator of changectx instances.
1382 1382
1383 1383 Revset aliases from the configuration are not expanded. To expand
1384 1384 user aliases, consider calling ``scmutil.revrange()``.
1385 1385 '''
1386 1386 for r in self.revs(expr, *args):
1387 1387 yield self[r]
1388 1388
1389 1389 def anyrevs(self, specs, user=False, localalias=None):
1390 1390 '''Find revisions matching one of the given revsets.
1391 1391
1392 1392 Revset aliases from the configuration are not expanded by default. To
1393 1393 expand user aliases, specify ``user=True``. To provide some local
1394 1394 definitions overriding user aliases, set ``localalias`` to
1395 1395 ``{name: definitionstring}``.
1396 1396 '''
1397 1397 if user:
1398 1398 m = revset.matchany(self.ui, specs,
1399 1399 lookup=revset.lookupfn(self),
1400 1400 localalias=localalias)
1401 1401 else:
1402 1402 m = revset.matchany(None, specs, localalias=localalias)
1403 1403 return m(self)
1404 1404
1405 1405 def url(self):
1406 1406 return 'file:' + self.root
1407 1407
1408 1408 def hook(self, name, throw=False, **args):
1409 1409 """Call a hook, passing this repo instance.
1410 1410
1411 1411 This a convenience method to aid invoking hooks. Extensions likely
1412 1412 won't call this unless they have registered a custom hook or are
1413 1413 replacing code that is expected to call a hook.
1414 1414 """
1415 1415 return hook.hook(self.ui, self, name, throw, **args)
1416 1416
1417 1417 @filteredpropertycache
1418 1418 def _tagscache(self):
1419 1419 '''Returns a tagscache object that contains various tags related
1420 1420 caches.'''
1421 1421
1422 1422 # This simplifies its cache management by having one decorated
1423 1423 # function (this one) and the rest simply fetch things from it.
1424 1424 class tagscache(object):
1425 1425 def __init__(self):
1426 1426 # These two define the set of tags for this repository. tags
1427 1427 # maps tag name to node; tagtypes maps tag name to 'global' or
1428 1428 # 'local'. (Global tags are defined by .hgtags across all
1429 1429 # heads, and local tags are defined in .hg/localtags.)
1430 1430 # They constitute the in-memory cache of tags.
1431 1431 self.tags = self.tagtypes = None
1432 1432
1433 1433 self.nodetagscache = self.tagslist = None
1434 1434
1435 1435 cache = tagscache()
1436 1436 cache.tags, cache.tagtypes = self._findtags()
1437 1437
1438 1438 return cache
1439 1439
1440 1440 def tags(self):
1441 1441 '''return a mapping of tag to node'''
1442 1442 t = {}
1443 1443 if self.changelog.filteredrevs:
1444 1444 tags, tt = self._findtags()
1445 1445 else:
1446 1446 tags = self._tagscache.tags
1447 1447 rev = self.changelog.rev
1448 1448 for k, v in tags.iteritems():
1449 1449 try:
1450 1450 # ignore tags to unknown nodes
1451 1451 rev(v)
1452 1452 t[k] = v
1453 1453 except (error.LookupError, ValueError):
1454 1454 pass
1455 1455 return t
1456 1456
1457 1457 def _findtags(self):
1458 1458 '''Do the hard work of finding tags. Return a pair of dicts
1459 1459 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1460 1460 maps tag name to a string like \'global\' or \'local\'.
1461 1461 Subclasses or extensions are free to add their own tags, but
1462 1462 should be aware that the returned dicts will be retained for the
1463 1463 duration of the localrepo object.'''
1464 1464
1465 1465 # XXX what tagtype should subclasses/extensions use? Currently
1466 1466 # mq and bookmarks add tags, but do not set the tagtype at all.
1467 1467 # Should each extension invent its own tag type? Should there
1468 1468 # be one tagtype for all such "virtual" tags? Or is the status
1469 1469 # quo fine?
1470 1470
1471 1471
1472 1472 # map tag name to (node, hist)
1473 1473 alltags = tagsmod.findglobaltags(self.ui, self)
1474 1474 # map tag name to tag type
1475 1475 tagtypes = dict((tag, 'global') for tag in alltags)
1476 1476
1477 1477 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1478 1478
1479 1479 # Build the return dicts. Have to re-encode tag names because
1480 1480 # the tags module always uses UTF-8 (in order not to lose info
1481 1481 # writing to the cache), but the rest of Mercurial wants them in
1482 1482 # local encoding.
1483 1483 tags = {}
1484 1484 for (name, (node, hist)) in alltags.iteritems():
1485 1485 if node != nullid:
1486 1486 tags[encoding.tolocal(name)] = node
1487 1487 tags['tip'] = self.changelog.tip()
1488 1488 tagtypes = dict([(encoding.tolocal(name), value)
1489 1489 for (name, value) in tagtypes.iteritems()])
1490 1490 return (tags, tagtypes)
1491 1491
1492 1492 def tagtype(self, tagname):
1493 1493 '''
1494 1494 return the type of the given tag. result can be:
1495 1495
1496 1496 'local' : a local tag
1497 1497 'global' : a global tag
1498 1498 None : tag does not exist
1499 1499 '''
1500 1500
1501 1501 return self._tagscache.tagtypes.get(tagname)
1502 1502
1503 1503 def tagslist(self):
1504 1504 '''return a list of tags ordered by revision'''
1505 1505 if not self._tagscache.tagslist:
1506 1506 l = []
1507 1507 for t, n in self.tags().iteritems():
1508 1508 l.append((self.changelog.rev(n), t, n))
1509 1509 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1510 1510
1511 1511 return self._tagscache.tagslist
1512 1512
1513 1513 def nodetags(self, node):
1514 1514 '''return the tags associated with a node'''
1515 1515 if not self._tagscache.nodetagscache:
1516 1516 nodetagscache = {}
1517 1517 for t, n in self._tagscache.tags.iteritems():
1518 1518 nodetagscache.setdefault(n, []).append(t)
1519 1519 for tags in nodetagscache.itervalues():
1520 1520 tags.sort()
1521 1521 self._tagscache.nodetagscache = nodetagscache
1522 1522 return self._tagscache.nodetagscache.get(node, [])
1523 1523
1524 1524 def nodebookmarks(self, node):
1525 1525 """return the list of bookmarks pointing to the specified node"""
1526 1526 return self._bookmarks.names(node)
1527 1527
1528 1528 def branchmap(self):
1529 1529 '''returns a dictionary {branch: [branchheads]} with branchheads
1530 1530 ordered by increasing revision number'''
1531 1531 return self._branchcaches[self]
1532 1532
1533 1533 @unfilteredmethod
1534 1534 def revbranchcache(self):
1535 1535 if not self._revbranchcache:
1536 1536 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1537 1537 return self._revbranchcache
1538 1538
1539 1539 def branchtip(self, branch, ignoremissing=False):
1540 1540 '''return the tip node for a given branch
1541 1541
1542 1542 If ignoremissing is True, then this method will not raise an error.
1543 1543 This is helpful for callers that only expect None for a missing branch
1544 1544 (e.g. namespace).
1545 1545
1546 1546 '''
1547 1547 try:
1548 1548 return self.branchmap().branchtip(branch)
1549 1549 except KeyError:
1550 1550 if not ignoremissing:
1551 1551 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1552 1552 else:
1553 1553 pass
1554 1554
1555 1555 def lookup(self, key):
1556 1556 return scmutil.revsymbol(self, key).node()
1557 1557
1558 1558 def lookupbranch(self, key):
1559 if key in self.branchmap():
1559 if key in self.branchmap().entries:
1560 1560 return key
1561 1561
1562 1562 return scmutil.revsymbol(self, key).branch()
1563 1563
1564 1564 def known(self, nodes):
1565 1565 cl = self.changelog
1566 1566 nm = cl.nodemap
1567 1567 filtered = cl.filteredrevs
1568 1568 result = []
1569 1569 for n in nodes:
1570 1570 r = nm.get(n)
1571 1571 resp = not (r is None or r in filtered)
1572 1572 result.append(resp)
1573 1573 return result
1574 1574
1575 1575 def local(self):
1576 1576 return self
1577 1577
1578 1578 def publishing(self):
1579 1579 # it's safe (and desirable) to trust the publish flag unconditionally
1580 1580 # so that we don't finalize changes shared between users via ssh or nfs
1581 1581 return self.ui.configbool('phases', 'publish', untrusted=True)
1582 1582
1583 1583 def cancopy(self):
1584 1584 # so statichttprepo's override of local() works
1585 1585 if not self.local():
1586 1586 return False
1587 1587 if not self.publishing():
1588 1588 return True
1589 1589 # if publishing we can't copy if there is filtered content
1590 1590 return not self.filtered('visible').changelog.filteredrevs
1591 1591
1592 1592 def shared(self):
1593 1593 '''the type of shared repository (None if not shared)'''
1594 1594 if self.sharedpath != self.path:
1595 1595 return 'store'
1596 1596 return None
1597 1597
1598 1598 def wjoin(self, f, *insidef):
1599 1599 return self.vfs.reljoin(self.root, f, *insidef)
1600 1600
1601 1601 def setparents(self, p1, p2=nullid):
1602 1602 with self.dirstate.parentchange():
1603 1603 copies = self.dirstate.setparents(p1, p2)
1604 1604 pctx = self[p1]
1605 1605 if copies:
1606 1606 # Adjust copy records, the dirstate cannot do it, it
1607 1607 # requires access to parents manifests. Preserve them
1608 1608 # only for entries added to first parent.
1609 1609 for f in copies:
1610 1610 if f not in pctx and copies[f] in pctx:
1611 1611 self.dirstate.copy(copies[f], f)
1612 1612 if p2 == nullid:
1613 1613 for f, s in sorted(self.dirstate.copies().items()):
1614 1614 if f not in pctx and s not in pctx:
1615 1615 self.dirstate.copy(None, f)
1616 1616
1617 1617 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1618 1618 """changeid must be a changeset revision, if specified.
1619 1619 fileid can be a file revision or node."""
1620 1620 return context.filectx(self, path, changeid, fileid,
1621 1621 changectx=changectx)
1622 1622
1623 1623 def getcwd(self):
1624 1624 return self.dirstate.getcwd()
1625 1625
1626 1626 def pathto(self, f, cwd=None):
1627 1627 return self.dirstate.pathto(f, cwd)
1628 1628
1629 1629 def _loadfilter(self, filter):
1630 1630 if filter not in self._filterpats:
1631 1631 l = []
1632 1632 for pat, cmd in self.ui.configitems(filter):
1633 1633 if cmd == '!':
1634 1634 continue
1635 1635 mf = matchmod.match(self.root, '', [pat])
1636 1636 fn = None
1637 1637 params = cmd
1638 1638 for name, filterfn in self._datafilters.iteritems():
1639 1639 if cmd.startswith(name):
1640 1640 fn = filterfn
1641 1641 params = cmd[len(name):].lstrip()
1642 1642 break
1643 1643 if not fn:
1644 1644 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1645 1645 # Wrap old filters not supporting keyword arguments
1646 1646 if not pycompat.getargspec(fn)[2]:
1647 1647 oldfn = fn
1648 1648 fn = lambda s, c, **kwargs: oldfn(s, c)
1649 1649 l.append((mf, fn, params))
1650 1650 self._filterpats[filter] = l
1651 1651 return self._filterpats[filter]
1652 1652
1653 1653 def _filter(self, filterpats, filename, data):
1654 1654 for mf, fn, cmd in filterpats:
1655 1655 if mf(filename):
1656 1656 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1657 1657 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1658 1658 break
1659 1659
1660 1660 return data
1661 1661
1662 1662 @unfilteredpropertycache
1663 1663 def _encodefilterpats(self):
1664 1664 return self._loadfilter('encode')
1665 1665
1666 1666 @unfilteredpropertycache
1667 1667 def _decodefilterpats(self):
1668 1668 return self._loadfilter('decode')
1669 1669
1670 1670 def adddatafilter(self, name, filter):
1671 1671 self._datafilters[name] = filter
1672 1672
1673 1673 def wread(self, filename):
1674 1674 if self.wvfs.islink(filename):
1675 1675 data = self.wvfs.readlink(filename)
1676 1676 else:
1677 1677 data = self.wvfs.read(filename)
1678 1678 return self._filter(self._encodefilterpats, filename, data)
1679 1679
1680 1680 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1681 1681 """write ``data`` into ``filename`` in the working directory
1682 1682
1683 1683 This returns length of written (maybe decoded) data.
1684 1684 """
1685 1685 data = self._filter(self._decodefilterpats, filename, data)
1686 1686 if 'l' in flags:
1687 1687 self.wvfs.symlink(data, filename)
1688 1688 else:
1689 1689 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1690 1690 **kwargs)
1691 1691 if 'x' in flags:
1692 1692 self.wvfs.setflags(filename, False, True)
1693 1693 else:
1694 1694 self.wvfs.setflags(filename, False, False)
1695 1695 return len(data)
1696 1696
1697 1697 def wwritedata(self, filename, data):
1698 1698 return self._filter(self._decodefilterpats, filename, data)
1699 1699
1700 1700 def currenttransaction(self):
1701 1701 """return the current transaction or None if non exists"""
1702 1702 if self._transref:
1703 1703 tr = self._transref()
1704 1704 else:
1705 1705 tr = None
1706 1706
1707 1707 if tr and tr.running():
1708 1708 return tr
1709 1709 return None
1710 1710
1711 1711 def transaction(self, desc, report=None):
1712 1712 if (self.ui.configbool('devel', 'all-warnings')
1713 1713 or self.ui.configbool('devel', 'check-locks')):
1714 1714 if self._currentlock(self._lockref) is None:
1715 1715 raise error.ProgrammingError('transaction requires locking')
1716 1716 tr = self.currenttransaction()
1717 1717 if tr is not None:
1718 1718 return tr.nest(name=desc)
1719 1719
1720 1720 # abort here if the journal already exists
1721 1721 if self.svfs.exists("journal"):
1722 1722 raise error.RepoError(
1723 1723 _("abandoned transaction found"),
1724 1724 hint=_("run 'hg recover' to clean up transaction"))
1725 1725
1726 1726 idbase = "%.40f#%f" % (random.random(), time.time())
1727 1727 ha = hex(hashlib.sha1(idbase).digest())
1728 1728 txnid = 'TXN:' + ha
1729 1729 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1730 1730
1731 1731 self._writejournal(desc)
1732 1732 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1733 1733 if report:
1734 1734 rp = report
1735 1735 else:
1736 1736 rp = self.ui.warn
1737 1737 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1738 1738 # we must avoid cyclic reference between repo and transaction.
1739 1739 reporef = weakref.ref(self)
1740 1740 # Code to track tag movement
1741 1741 #
1742 1742 # Since tags are all handled as file content, it is actually quite hard
1743 1743 # to track these movement from a code perspective. So we fallback to a
1744 1744 # tracking at the repository level. One could envision to track changes
1745 1745 # to the '.hgtags' file through changegroup apply but that fails to
1746 1746 # cope with case where transaction expose new heads without changegroup
1747 1747 # being involved (eg: phase movement).
1748 1748 #
1749 1749 # For now, We gate the feature behind a flag since this likely comes
1750 1750 # with performance impacts. The current code run more often than needed
1751 1751 # and do not use caches as much as it could. The current focus is on
1752 1752 # the behavior of the feature so we disable it by default. The flag
1753 1753 # will be removed when we are happy with the performance impact.
1754 1754 #
1755 1755 # Once this feature is no longer experimental move the following
1756 1756 # documentation to the appropriate help section:
1757 1757 #
1758 1758 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1759 1759 # tags (new or changed or deleted tags). In addition the details of
1760 1760 # these changes are made available in a file at:
1761 1761 # ``REPOROOT/.hg/changes/tags.changes``.
1762 1762 # Make sure you check for HG_TAG_MOVED before reading that file as it
1763 1763 # might exist from a previous transaction even if no tag were touched
1764 1764 # in this one. Changes are recorded in a line base format::
1765 1765 #
1766 1766 # <action> <hex-node> <tag-name>\n
1767 1767 #
1768 1768 # Actions are defined as follow:
1769 1769 # "-R": tag is removed,
1770 1770 # "+A": tag is added,
1771 1771 # "-M": tag is moved (old value),
1772 1772 # "+M": tag is moved (new value),
1773 1773 tracktags = lambda x: None
1774 1774 # experimental config: experimental.hook-track-tags
1775 1775 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1776 1776 if desc != 'strip' and shouldtracktags:
1777 1777 oldheads = self.changelog.headrevs()
1778 1778 def tracktags(tr2):
1779 1779 repo = reporef()
1780 1780 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1781 1781 newheads = repo.changelog.headrevs()
1782 1782 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1783 1783 # notes: we compare lists here.
1784 1784 # As we do it only once buiding set would not be cheaper
1785 1785 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1786 1786 if changes:
1787 1787 tr2.hookargs['tag_moved'] = '1'
1788 1788 with repo.vfs('changes/tags.changes', 'w',
1789 1789 atomictemp=True) as changesfile:
1790 1790 # note: we do not register the file to the transaction
1791 1791 # because we needs it to still exist on the transaction
1792 1792 # is close (for txnclose hooks)
1793 1793 tagsmod.writediff(changesfile, changes)
1794 1794 def validate(tr2):
1795 1795 """will run pre-closing hooks"""
1796 1796 # XXX the transaction API is a bit lacking here so we take a hacky
1797 1797 # path for now
1798 1798 #
1799 1799 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1800 1800 # dict is copied before these run. In addition we needs the data
1801 1801 # available to in memory hooks too.
1802 1802 #
1803 1803 # Moreover, we also need to make sure this runs before txnclose
1804 1804 # hooks and there is no "pending" mechanism that would execute
1805 1805 # logic only if hooks are about to run.
1806 1806 #
1807 1807 # Fixing this limitation of the transaction is also needed to track
1808 1808 # other families of changes (bookmarks, phases, obsolescence).
1809 1809 #
1810 1810 # This will have to be fixed before we remove the experimental
1811 1811 # gating.
1812 1812 tracktags(tr2)
1813 1813 repo = reporef()
1814 1814 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1815 1815 scmutil.enforcesinglehead(repo, tr2, desc)
1816 1816 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1817 1817 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1818 1818 args = tr.hookargs.copy()
1819 1819 args.update(bookmarks.preparehookargs(name, old, new))
1820 1820 repo.hook('pretxnclose-bookmark', throw=True,
1821 1821 **pycompat.strkwargs(args))
1822 1822 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1823 1823 cl = repo.unfiltered().changelog
1824 1824 for rev, (old, new) in tr.changes['phases'].items():
1825 1825 args = tr.hookargs.copy()
1826 1826 node = hex(cl.node(rev))
1827 1827 args.update(phases.preparehookargs(node, old, new))
1828 1828 repo.hook('pretxnclose-phase', throw=True,
1829 1829 **pycompat.strkwargs(args))
1830 1830
1831 1831 repo.hook('pretxnclose', throw=True,
1832 1832 **pycompat.strkwargs(tr.hookargs))
1833 1833 def releasefn(tr, success):
1834 1834 repo = reporef()
1835 1835 if success:
1836 1836 # this should be explicitly invoked here, because
1837 1837 # in-memory changes aren't written out at closing
1838 1838 # transaction, if tr.addfilegenerator (via
1839 1839 # dirstate.write or so) isn't invoked while
1840 1840 # transaction running
1841 1841 repo.dirstate.write(None)
1842 1842 else:
1843 1843 # discard all changes (including ones already written
1844 1844 # out) in this transaction
1845 1845 narrowspec.restorebackup(self, 'journal.narrowspec')
1846 1846 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1847 1847 repo.dirstate.restorebackup(None, 'journal.dirstate')
1848 1848
1849 1849 repo.invalidate(clearfilecache=True)
1850 1850
1851 1851 tr = transaction.transaction(rp, self.svfs, vfsmap,
1852 1852 "journal",
1853 1853 "undo",
1854 1854 aftertrans(renames),
1855 1855 self.store.createmode,
1856 1856 validator=validate,
1857 1857 releasefn=releasefn,
1858 1858 checkambigfiles=_cachedfiles,
1859 1859 name=desc)
1860 1860 tr.changes['origrepolen'] = len(self)
1861 1861 tr.changes['obsmarkers'] = set()
1862 1862 tr.changes['phases'] = {}
1863 1863 tr.changes['bookmarks'] = {}
1864 1864
1865 1865 tr.hookargs['txnid'] = txnid
1866 1866 tr.hookargs['txnname'] = desc
1867 1867 # note: writing the fncache only during finalize mean that the file is
1868 1868 # outdated when running hooks. As fncache is used for streaming clone,
1869 1869 # this is not expected to break anything that happen during the hooks.
1870 1870 tr.addfinalize('flush-fncache', self.store.write)
1871 1871 def txnclosehook(tr2):
1872 1872 """To be run if transaction is successful, will schedule a hook run
1873 1873 """
1874 1874 # Don't reference tr2 in hook() so we don't hold a reference.
1875 1875 # This reduces memory consumption when there are multiple
1876 1876 # transactions per lock. This can likely go away if issue5045
1877 1877 # fixes the function accumulation.
1878 1878 hookargs = tr2.hookargs
1879 1879
1880 1880 def hookfunc():
1881 1881 repo = reporef()
1882 1882 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1883 1883 bmchanges = sorted(tr.changes['bookmarks'].items())
1884 1884 for name, (old, new) in bmchanges:
1885 1885 args = tr.hookargs.copy()
1886 1886 args.update(bookmarks.preparehookargs(name, old, new))
1887 1887 repo.hook('txnclose-bookmark', throw=False,
1888 1888 **pycompat.strkwargs(args))
1889 1889
1890 1890 if hook.hashook(repo.ui, 'txnclose-phase'):
1891 1891 cl = repo.unfiltered().changelog
1892 1892 phasemv = sorted(tr.changes['phases'].items())
1893 1893 for rev, (old, new) in phasemv:
1894 1894 args = tr.hookargs.copy()
1895 1895 node = hex(cl.node(rev))
1896 1896 args.update(phases.preparehookargs(node, old, new))
1897 1897 repo.hook('txnclose-phase', throw=False,
1898 1898 **pycompat.strkwargs(args))
1899 1899
1900 1900 repo.hook('txnclose', throw=False,
1901 1901 **pycompat.strkwargs(hookargs))
1902 1902 reporef()._afterlock(hookfunc)
1903 1903 tr.addfinalize('txnclose-hook', txnclosehook)
1904 1904 # Include a leading "-" to make it happen before the transaction summary
1905 1905 # reports registered via scmutil.registersummarycallback() whose names
1906 1906 # are 00-txnreport etc. That way, the caches will be warm when the
1907 1907 # callbacks run.
1908 1908 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1909 1909 def txnaborthook(tr2):
1910 1910 """To be run if transaction is aborted
1911 1911 """
1912 1912 reporef().hook('txnabort', throw=False,
1913 1913 **pycompat.strkwargs(tr2.hookargs))
1914 1914 tr.addabort('txnabort-hook', txnaborthook)
1915 1915 # avoid eager cache invalidation. in-memory data should be identical
1916 1916 # to stored data if transaction has no error.
1917 1917 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1918 1918 self._transref = weakref.ref(tr)
1919 1919 scmutil.registersummarycallback(self, tr, desc)
1920 1920 return tr
1921 1921
1922 1922 def _journalfiles(self):
1923 1923 return ((self.svfs, 'journal'),
1924 1924 (self.svfs, 'journal.narrowspec'),
1925 1925 (self.vfs, 'journal.narrowspec.dirstate'),
1926 1926 (self.vfs, 'journal.dirstate'),
1927 1927 (self.vfs, 'journal.branch'),
1928 1928 (self.vfs, 'journal.desc'),
1929 1929 (self.vfs, 'journal.bookmarks'),
1930 1930 (self.svfs, 'journal.phaseroots'))
1931 1931
1932 1932 def undofiles(self):
1933 1933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1934 1934
1935 1935 @unfilteredmethod
1936 1936 def _writejournal(self, desc):
1937 1937 self.dirstate.savebackup(None, 'journal.dirstate')
1938 1938 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1939 1939 narrowspec.savebackup(self, 'journal.narrowspec')
1940 1940 self.vfs.write("journal.branch",
1941 1941 encoding.fromlocal(self.dirstate.branch()))
1942 1942 self.vfs.write("journal.desc",
1943 1943 "%d\n%s\n" % (len(self), desc))
1944 1944 self.vfs.write("journal.bookmarks",
1945 1945 self.vfs.tryread("bookmarks"))
1946 1946 self.svfs.write("journal.phaseroots",
1947 1947 self.svfs.tryread("phaseroots"))
1948 1948
1949 1949 def recover(self):
1950 1950 with self.lock():
1951 1951 if self.svfs.exists("journal"):
1952 1952 self.ui.status(_("rolling back interrupted transaction\n"))
1953 1953 vfsmap = {'': self.svfs,
1954 1954 'plain': self.vfs,}
1955 1955 transaction.rollback(self.svfs, vfsmap, "journal",
1956 1956 self.ui.warn,
1957 1957 checkambigfiles=_cachedfiles)
1958 1958 self.invalidate()
1959 1959 return True
1960 1960 else:
1961 1961 self.ui.warn(_("no interrupted transaction available\n"))
1962 1962 return False
1963 1963
1964 1964 def rollback(self, dryrun=False, force=False):
1965 1965 wlock = lock = dsguard = None
1966 1966 try:
1967 1967 wlock = self.wlock()
1968 1968 lock = self.lock()
1969 1969 if self.svfs.exists("undo"):
1970 1970 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1971 1971
1972 1972 return self._rollback(dryrun, force, dsguard)
1973 1973 else:
1974 1974 self.ui.warn(_("no rollback information available\n"))
1975 1975 return 1
1976 1976 finally:
1977 1977 release(dsguard, lock, wlock)
1978 1978
1979 1979 @unfilteredmethod # Until we get smarter cache management
1980 1980 def _rollback(self, dryrun, force, dsguard):
1981 1981 ui = self.ui
1982 1982 try:
1983 1983 args = self.vfs.read('undo.desc').splitlines()
1984 1984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1985 1985 if len(args) >= 3:
1986 1986 detail = args[2]
1987 1987 oldtip = oldlen - 1
1988 1988
1989 1989 if detail and ui.verbose:
1990 1990 msg = (_('repository tip rolled back to revision %d'
1991 1991 ' (undo %s: %s)\n')
1992 1992 % (oldtip, desc, detail))
1993 1993 else:
1994 1994 msg = (_('repository tip rolled back to revision %d'
1995 1995 ' (undo %s)\n')
1996 1996 % (oldtip, desc))
1997 1997 except IOError:
1998 1998 msg = _('rolling back unknown transaction\n')
1999 1999 desc = None
2000 2000
2001 2001 if not force and self['.'] != self['tip'] and desc == 'commit':
2002 2002 raise error.Abort(
2003 2003 _('rollback of last commit while not checked out '
2004 2004 'may lose data'), hint=_('use -f to force'))
2005 2005
2006 2006 ui.status(msg)
2007 2007 if dryrun:
2008 2008 return 0
2009 2009
2010 2010 parents = self.dirstate.parents()
2011 2011 self.destroying()
2012 2012 vfsmap = {'plain': self.vfs, '': self.svfs}
2013 2013 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2014 2014 checkambigfiles=_cachedfiles)
2015 2015 if self.vfs.exists('undo.bookmarks'):
2016 2016 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2017 2017 if self.svfs.exists('undo.phaseroots'):
2018 2018 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2019 2019 self.invalidate()
2020 2020
2021 2021 parentgone = any(p not in self.changelog.nodemap for p in parents)
2022 2022 if parentgone:
2023 2023 # prevent dirstateguard from overwriting already restored one
2024 2024 dsguard.close()
2025 2025
2026 2026 narrowspec.restorebackup(self, 'undo.narrowspec')
2027 2027 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2028 2028 self.dirstate.restorebackup(None, 'undo.dirstate')
2029 2029 try:
2030 2030 branch = self.vfs.read('undo.branch')
2031 2031 self.dirstate.setbranch(encoding.tolocal(branch))
2032 2032 except IOError:
2033 2033 ui.warn(_('named branch could not be reset: '
2034 2034 'current branch is still \'%s\'\n')
2035 2035 % self.dirstate.branch())
2036 2036
2037 2037 parents = tuple([p.rev() for p in self[None].parents()])
2038 2038 if len(parents) > 1:
2039 2039 ui.status(_('working directory now based on '
2040 2040 'revisions %d and %d\n') % parents)
2041 2041 else:
2042 2042 ui.status(_('working directory now based on '
2043 2043 'revision %d\n') % parents)
2044 2044 mergemod.mergestate.clean(self, self['.'].node())
2045 2045
2046 2046 # TODO: if we know which new heads may result from this rollback, pass
2047 2047 # them to destroy(), which will prevent the branchhead cache from being
2048 2048 # invalidated.
2049 2049 self.destroyed()
2050 2050 return 0
2051 2051
2052 2052 def _buildcacheupdater(self, newtransaction):
2053 2053 """called during transaction to build the callback updating cache
2054 2054
2055 2055 Lives on the repository to help extension who might want to augment
2056 2056 this logic. For this purpose, the created transaction is passed to the
2057 2057 method.
2058 2058 """
2059 2059 # we must avoid cyclic reference between repo and transaction.
2060 2060 reporef = weakref.ref(self)
2061 2061 def updater(tr):
2062 2062 repo = reporef()
2063 2063 repo.updatecaches(tr)
2064 2064 return updater
2065 2065
2066 2066 @unfilteredmethod
2067 2067 def updatecaches(self, tr=None, full=False):
2068 2068 """warm appropriate caches
2069 2069
2070 2070 If this function is called after a transaction closed. The transaction
2071 2071 will be available in the 'tr' argument. This can be used to selectively
2072 2072 update caches relevant to the changes in that transaction.
2073 2073
2074 2074 If 'full' is set, make sure all caches the function knows about have
2075 2075 up-to-date data. Even the ones usually loaded more lazily.
2076 2076 """
2077 2077 if tr is not None and tr.hookargs.get('source') == 'strip':
2078 2078 # During strip, many caches are invalid but
2079 2079 # later call to `destroyed` will refresh them.
2080 2080 return
2081 2081
2082 2082 if tr is None or tr.changes['origrepolen'] < len(self):
2083 2083 # accessing the 'ser ved' branchmap should refresh all the others,
2084 2084 self.ui.debug('updating the branch cache\n')
2085 2085 self.filtered('served').branchmap()
2086 2086
2087 2087 if full:
2088 2088 unfi = self.unfiltered()
2089 2089 rbc = unfi.revbranchcache()
2090 2090 for r in unfi.changelog:
2091 2091 rbc.branchinfo(r)
2092 2092 rbc.write()
2093 2093
2094 2094 # ensure the working copy parents are in the manifestfulltextcache
2095 2095 for ctx in self['.'].parents():
2096 2096 ctx.manifest() # accessing the manifest is enough
2097 2097
2098 2098 # accessing tags warm the cache
2099 2099 self.tags()
2100 2100 self.filtered('served').tags()
2101 2101
2102 2102 def invalidatecaches(self):
2103 2103
2104 2104 if r'_tagscache' in vars(self):
2105 2105 # can't use delattr on proxy
2106 2106 del self.__dict__[r'_tagscache']
2107 2107
2108 2108 self._branchcaches.clear()
2109 2109 self.invalidatevolatilesets()
2110 2110 self._sparsesignaturecache.clear()
2111 2111
2112 2112 def invalidatevolatilesets(self):
2113 2113 self.filteredrevcache.clear()
2114 2114 obsolete.clearobscaches(self)
2115 2115
2116 2116 def invalidatedirstate(self):
2117 2117 '''Invalidates the dirstate, causing the next call to dirstate
2118 2118 to check if it was modified since the last time it was read,
2119 2119 rereading it if it has.
2120 2120
2121 2121 This is different to dirstate.invalidate() that it doesn't always
2122 2122 rereads the dirstate. Use dirstate.invalidate() if you want to
2123 2123 explicitly read the dirstate again (i.e. restoring it to a previous
2124 2124 known good state).'''
2125 2125 if hasunfilteredcache(self, r'dirstate'):
2126 2126 for k in self.dirstate._filecache:
2127 2127 try:
2128 2128 delattr(self.dirstate, k)
2129 2129 except AttributeError:
2130 2130 pass
2131 2131 delattr(self.unfiltered(), r'dirstate')
2132 2132
2133 2133 def invalidate(self, clearfilecache=False):
2134 2134 '''Invalidates both store and non-store parts other than dirstate
2135 2135
2136 2136 If a transaction is running, invalidation of store is omitted,
2137 2137 because discarding in-memory changes might cause inconsistency
2138 2138 (e.g. incomplete fncache causes unintentional failure, but
2139 2139 redundant one doesn't).
2140 2140 '''
2141 2141 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2142 2142 for k in list(self._filecache.keys()):
2143 2143 # dirstate is invalidated separately in invalidatedirstate()
2144 2144 if k == 'dirstate':
2145 2145 continue
2146 2146 if (k == 'changelog' and
2147 2147 self.currenttransaction() and
2148 2148 self.changelog._delayed):
2149 2149 # The changelog object may store unwritten revisions. We don't
2150 2150 # want to lose them.
2151 2151 # TODO: Solve the problem instead of working around it.
2152 2152 continue
2153 2153
2154 2154 if clearfilecache:
2155 2155 del self._filecache[k]
2156 2156 try:
2157 2157 delattr(unfiltered, k)
2158 2158 except AttributeError:
2159 2159 pass
2160 2160 self.invalidatecaches()
2161 2161 if not self.currenttransaction():
2162 2162 # TODO: Changing contents of store outside transaction
2163 2163 # causes inconsistency. We should make in-memory store
2164 2164 # changes detectable, and abort if changed.
2165 2165 self.store.invalidatecaches()
2166 2166
2167 2167 def invalidateall(self):
2168 2168 '''Fully invalidates both store and non-store parts, causing the
2169 2169 subsequent operation to reread any outside changes.'''
2170 2170 # extension should hook this to invalidate its caches
2171 2171 self.invalidate()
2172 2172 self.invalidatedirstate()
2173 2173
2174 2174 @unfilteredmethod
2175 2175 def _refreshfilecachestats(self, tr):
2176 2176 """Reload stats of cached files so that they are flagged as valid"""
2177 2177 for k, ce in self._filecache.items():
2178 2178 k = pycompat.sysstr(k)
2179 2179 if k == r'dirstate' or k not in self.__dict__:
2180 2180 continue
2181 2181 ce.refresh()
2182 2182
2183 2183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2184 2184 inheritchecker=None, parentenvvar=None):
2185 2185 parentlock = None
2186 2186 # the contents of parentenvvar are used by the underlying lock to
2187 2187 # determine whether it can be inherited
2188 2188 if parentenvvar is not None:
2189 2189 parentlock = encoding.environ.get(parentenvvar)
2190 2190
2191 2191 timeout = 0
2192 2192 warntimeout = 0
2193 2193 if wait:
2194 2194 timeout = self.ui.configint("ui", "timeout")
2195 2195 warntimeout = self.ui.configint("ui", "timeout.warn")
2196 2196 # internal config: ui.signal-safe-lock
2197 2197 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2198 2198
2199 2199 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2200 2200 releasefn=releasefn,
2201 2201 acquirefn=acquirefn, desc=desc,
2202 2202 inheritchecker=inheritchecker,
2203 2203 parentlock=parentlock,
2204 2204 signalsafe=signalsafe)
2205 2205 return l
2206 2206
2207 2207 def _afterlock(self, callback):
2208 2208 """add a callback to be run when the repository is fully unlocked
2209 2209
2210 2210 The callback will be executed when the outermost lock is released
2211 2211 (with wlock being higher level than 'lock')."""
2212 2212 for ref in (self._wlockref, self._lockref):
2213 2213 l = ref and ref()
2214 2214 if l and l.held:
2215 2215 l.postrelease.append(callback)
2216 2216 break
2217 2217 else: # no lock have been found.
2218 2218 callback()
2219 2219
2220 2220 def lock(self, wait=True):
2221 2221 '''Lock the repository store (.hg/store) and return a weak reference
2222 2222 to the lock. Use this before modifying the store (e.g. committing or
2223 2223 stripping). If you are opening a transaction, get a lock as well.)
2224 2224
2225 2225 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2226 2226 'wlock' first to avoid a dead-lock hazard.'''
2227 2227 l = self._currentlock(self._lockref)
2228 2228 if l is not None:
2229 2229 l.lock()
2230 2230 return l
2231 2231
2232 2232 l = self._lock(vfs=self.svfs,
2233 2233 lockname="lock",
2234 2234 wait=wait,
2235 2235 releasefn=None,
2236 2236 acquirefn=self.invalidate,
2237 2237 desc=_('repository %s') % self.origroot)
2238 2238 self._lockref = weakref.ref(l)
2239 2239 return l
2240 2240
2241 2241 def _wlockchecktransaction(self):
2242 2242 if self.currenttransaction() is not None:
2243 2243 raise error.LockInheritanceContractViolation(
2244 2244 'wlock cannot be inherited in the middle of a transaction')
2245 2245
2246 2246 def wlock(self, wait=True):
2247 2247 '''Lock the non-store parts of the repository (everything under
2248 2248 .hg except .hg/store) and return a weak reference to the lock.
2249 2249
2250 2250 Use this before modifying files in .hg.
2251 2251
2252 2252 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2253 2253 'wlock' first to avoid a dead-lock hazard.'''
2254 2254 l = self._wlockref and self._wlockref()
2255 2255 if l is not None and l.held:
2256 2256 l.lock()
2257 2257 return l
2258 2258
2259 2259 # We do not need to check for non-waiting lock acquisition. Such
2260 2260 # acquisition would not cause dead-lock as they would just fail.
2261 2261 if wait and (self.ui.configbool('devel', 'all-warnings')
2262 2262 or self.ui.configbool('devel', 'check-locks')):
2263 2263 if self._currentlock(self._lockref) is not None:
2264 2264 self.ui.develwarn('"wlock" acquired after "lock"')
2265 2265
2266 2266 def unlock():
2267 2267 if self.dirstate.pendingparentchange():
2268 2268 self.dirstate.invalidate()
2269 2269 else:
2270 2270 self.dirstate.write(None)
2271 2271
2272 2272 self._filecache['dirstate'].refresh()
2273 2273
2274 2274 l = self._lock(self.vfs, "wlock", wait, unlock,
2275 2275 self.invalidatedirstate, _('working directory of %s') %
2276 2276 self.origroot,
2277 2277 inheritchecker=self._wlockchecktransaction,
2278 2278 parentenvvar='HG_WLOCK_LOCKER')
2279 2279 self._wlockref = weakref.ref(l)
2280 2280 return l
2281 2281
2282 2282 def _currentlock(self, lockref):
2283 2283 """Returns the lock if it's held, or None if it's not."""
2284 2284 if lockref is None:
2285 2285 return None
2286 2286 l = lockref()
2287 2287 if l is None or not l.held:
2288 2288 return None
2289 2289 return l
2290 2290
2291 2291 def currentwlock(self):
2292 2292 """Returns the wlock if it's held, or None if it's not."""
2293 2293 return self._currentlock(self._wlockref)
2294 2294
2295 2295 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2296 2296 """
2297 2297 commit an individual file as part of a larger transaction
2298 2298 """
2299 2299
2300 2300 fname = fctx.path()
2301 2301 fparent1 = manifest1.get(fname, nullid)
2302 2302 fparent2 = manifest2.get(fname, nullid)
2303 2303 if isinstance(fctx, context.filectx):
2304 2304 node = fctx.filenode()
2305 2305 if node in [fparent1, fparent2]:
2306 2306 self.ui.debug('reusing %s filelog entry\n' % fname)
2307 2307 if manifest1.flags(fname) != fctx.flags():
2308 2308 changelist.append(fname)
2309 2309 return node
2310 2310
2311 2311 flog = self.file(fname)
2312 2312 meta = {}
2313 2313 cfname = fctx.copysource()
2314 2314 if cfname and cfname != fname:
2315 2315 # Mark the new revision of this file as a copy of another
2316 2316 # file. This copy data will effectively act as a parent
2317 2317 # of this new revision. If this is a merge, the first
2318 2318 # parent will be the nullid (meaning "look up the copy data")
2319 2319 # and the second one will be the other parent. For example:
2320 2320 #
2321 2321 # 0 --- 1 --- 3 rev1 changes file foo
2322 2322 # \ / rev2 renames foo to bar and changes it
2323 2323 # \- 2 -/ rev3 should have bar with all changes and
2324 2324 # should record that bar descends from
2325 2325 # bar in rev2 and foo in rev1
2326 2326 #
2327 2327 # this allows this merge to succeed:
2328 2328 #
2329 2329 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2330 2330 # \ / merging rev3 and rev4 should use bar@rev2
2331 2331 # \- 2 --- 4 as the merge base
2332 2332 #
2333 2333
2334 2334 crev = manifest1.get(cfname)
2335 2335 newfparent = fparent2
2336 2336
2337 2337 if manifest2: # branch merge
2338 2338 if fparent2 == nullid or crev is None: # copied on remote side
2339 2339 if cfname in manifest2:
2340 2340 crev = manifest2[cfname]
2341 2341 newfparent = fparent1
2342 2342
2343 2343 # Here, we used to search backwards through history to try to find
2344 2344 # where the file copy came from if the source of a copy was not in
2345 2345 # the parent directory. However, this doesn't actually make sense to
2346 2346 # do (what does a copy from something not in your working copy even
2347 2347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2348 2348 # the user that copy information was dropped, so if they didn't
2349 2349 # expect this outcome it can be fixed, but this is the correct
2350 2350 # behavior in this circumstance.
2351 2351
2352 2352 if crev:
2353 2353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2354 2354 meta["copy"] = cfname
2355 2355 meta["copyrev"] = hex(crev)
2356 2356 fparent1, fparent2 = nullid, newfparent
2357 2357 else:
2358 2358 self.ui.warn(_("warning: can't find ancestor for '%s' "
2359 2359 "copied from '%s'!\n") % (fname, cfname))
2360 2360
2361 2361 elif fparent1 == nullid:
2362 2362 fparent1, fparent2 = fparent2, nullid
2363 2363 elif fparent2 != nullid:
2364 2364 # is one parent an ancestor of the other?
2365 2365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2366 2366 if fparent1 in fparentancestors:
2367 2367 fparent1, fparent2 = fparent2, nullid
2368 2368 elif fparent2 in fparentancestors:
2369 2369 fparent2 = nullid
2370 2370
2371 2371 # is the file changed?
2372 2372 text = fctx.data()
2373 2373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2374 2374 changelist.append(fname)
2375 2375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2376 2376 # are just the flags changed during merge?
2377 2377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2378 2378 changelist.append(fname)
2379 2379
2380 2380 return fparent1
2381 2381
2382 2382 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2383 2383 """check for commit arguments that aren't committable"""
2384 2384 if match.isexact() or match.prefix():
2385 2385 matched = set(status.modified + status.added + status.removed)
2386 2386
2387 2387 for f in match.files():
2388 2388 f = self.dirstate.normalize(f)
2389 2389 if f == '.' or f in matched or f in wctx.substate:
2390 2390 continue
2391 2391 if f in status.deleted:
2392 2392 fail(f, _('file not found!'))
2393 2393 if f in vdirs: # visited directory
2394 2394 d = f + '/'
2395 2395 for mf in matched:
2396 2396 if mf.startswith(d):
2397 2397 break
2398 2398 else:
2399 2399 fail(f, _("no match under directory!"))
2400 2400 elif f not in self.dirstate:
2401 2401 fail(f, _("file not tracked!"))
2402 2402
2403 2403 @unfilteredmethod
2404 2404 def commit(self, text="", user=None, date=None, match=None, force=False,
2405 2405 editor=False, extra=None):
2406 2406 """Add a new revision to current repository.
2407 2407
2408 2408 Revision information is gathered from the working directory,
2409 2409 match can be used to filter the committed files. If editor is
2410 2410 supplied, it is called to get a commit message.
2411 2411 """
2412 2412 if extra is None:
2413 2413 extra = {}
2414 2414
2415 2415 def fail(f, msg):
2416 2416 raise error.Abort('%s: %s' % (f, msg))
2417 2417
2418 2418 if not match:
2419 2419 match = matchmod.always()
2420 2420
2421 2421 if not force:
2422 2422 vdirs = []
2423 2423 match.explicitdir = vdirs.append
2424 2424 match.bad = fail
2425 2425
2426 2426 # lock() for recent changelog (see issue4368)
2427 2427 with self.wlock(), self.lock():
2428 2428 wctx = self[None]
2429 2429 merge = len(wctx.parents()) > 1
2430 2430
2431 2431 if not force and merge and not match.always():
2432 2432 raise error.Abort(_('cannot partially commit a merge '
2433 2433 '(do not specify files or patterns)'))
2434 2434
2435 2435 status = self.status(match=match, clean=force)
2436 2436 if force:
2437 2437 status.modified.extend(status.clean) # mq may commit clean files
2438 2438
2439 2439 # check subrepos
2440 2440 subs, commitsubs, newstate = subrepoutil.precommit(
2441 2441 self.ui, wctx, status, match, force=force)
2442 2442
2443 2443 # make sure all explicit patterns are matched
2444 2444 if not force:
2445 2445 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2446 2446
2447 2447 cctx = context.workingcommitctx(self, status,
2448 2448 text, user, date, extra)
2449 2449
2450 2450 # internal config: ui.allowemptycommit
2451 2451 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2452 2452 or extra.get('close') or merge or cctx.files()
2453 2453 or self.ui.configbool('ui', 'allowemptycommit'))
2454 2454 if not allowemptycommit:
2455 2455 return None
2456 2456
2457 2457 if merge and cctx.deleted():
2458 2458 raise error.Abort(_("cannot commit merge with missing files"))
2459 2459
2460 2460 ms = mergemod.mergestate.read(self)
2461 2461 mergeutil.checkunresolved(ms)
2462 2462
2463 2463 if editor:
2464 2464 cctx._text = editor(self, cctx, subs)
2465 2465 edited = (text != cctx._text)
2466 2466
2467 2467 # Save commit message in case this transaction gets rolled back
2468 2468 # (e.g. by a pretxncommit hook). Leave the content alone on
2469 2469 # the assumption that the user will use the same editor again.
2470 2470 msgfn = self.savecommitmessage(cctx._text)
2471 2471
2472 2472 # commit subs and write new state
2473 2473 if subs:
2474 2474 uipathfn = scmutil.getuipathfn(self)
2475 2475 for s in sorted(commitsubs):
2476 2476 sub = wctx.sub(s)
2477 2477 self.ui.status(_('committing subrepository %s\n') %
2478 2478 uipathfn(subrepoutil.subrelpath(sub)))
2479 2479 sr = sub.commit(cctx._text, user, date)
2480 2480 newstate[s] = (newstate[s][0], sr)
2481 2481 subrepoutil.writestate(self, newstate)
2482 2482
2483 2483 p1, p2 = self.dirstate.parents()
2484 2484 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2485 2485 try:
2486 2486 self.hook("precommit", throw=True, parent1=hookp1,
2487 2487 parent2=hookp2)
2488 2488 with self.transaction('commit'):
2489 2489 ret = self.commitctx(cctx, True)
2490 2490 # update bookmarks, dirstate and mergestate
2491 2491 bookmarks.update(self, [p1, p2], ret)
2492 2492 cctx.markcommitted(ret)
2493 2493 ms.reset()
2494 2494 except: # re-raises
2495 2495 if edited:
2496 2496 self.ui.write(
2497 2497 _('note: commit message saved in %s\n') % msgfn)
2498 2498 raise
2499 2499
2500 2500 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2501 2501 # hack for command that use a temporary commit (eg: histedit)
2502 2502 # temporary commit got stripped before hook release
2503 2503 if self.changelog.hasnode(ret):
2504 2504 self.hook("commit", node=node, parent1=parent1,
2505 2505 parent2=parent2)
2506 2506 self._afterlock(commithook)
2507 2507 return ret
2508 2508
2509 2509 @unfilteredmethod
2510 2510 def commitctx(self, ctx, error=False):
2511 2511 """Add a new revision to current repository.
2512 2512 Revision information is passed via the context argument.
2513 2513
2514 2514 ctx.files() should list all files involved in this commit, i.e.
2515 2515 modified/added/removed files. On merge, it may be wider than the
2516 2516 ctx.files() to be committed, since any file nodes derived directly
2517 2517 from p1 or p2 are excluded from the committed ctx.files().
2518 2518 """
2519 2519
2520 2520 p1, p2 = ctx.p1(), ctx.p2()
2521 2521 user = ctx.user()
2522 2522
2523 2523 with self.lock(), self.transaction("commit") as tr:
2524 2524 trp = weakref.proxy(tr)
2525 2525
2526 2526 if ctx.manifestnode():
2527 2527 # reuse an existing manifest revision
2528 2528 self.ui.debug('reusing known manifest\n')
2529 2529 mn = ctx.manifestnode()
2530 2530 files = ctx.files()
2531 2531 elif ctx.files():
2532 2532 m1ctx = p1.manifestctx()
2533 2533 m2ctx = p2.manifestctx()
2534 2534 mctx = m1ctx.copy()
2535 2535
2536 2536 m = mctx.read()
2537 2537 m1 = m1ctx.read()
2538 2538 m2 = m2ctx.read()
2539 2539
2540 2540 # check in files
2541 2541 added = []
2542 2542 changed = []
2543 2543 removed = list(ctx.removed())
2544 2544 linkrev = len(self)
2545 2545 self.ui.note(_("committing files:\n"))
2546 2546 uipathfn = scmutil.getuipathfn(self)
2547 2547 for f in sorted(ctx.modified() + ctx.added()):
2548 2548 self.ui.note(uipathfn(f) + "\n")
2549 2549 try:
2550 2550 fctx = ctx[f]
2551 2551 if fctx is None:
2552 2552 removed.append(f)
2553 2553 else:
2554 2554 added.append(f)
2555 2555 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2556 2556 trp, changed)
2557 2557 m.setflag(f, fctx.flags())
2558 2558 except OSError:
2559 2559 self.ui.warn(_("trouble committing %s!\n") %
2560 2560 uipathfn(f))
2561 2561 raise
2562 2562 except IOError as inst:
2563 2563 errcode = getattr(inst, 'errno', errno.ENOENT)
2564 2564 if error or errcode and errcode != errno.ENOENT:
2565 2565 self.ui.warn(_("trouble committing %s!\n") %
2566 2566 uipathfn(f))
2567 2567 raise
2568 2568
2569 2569 # update manifest
2570 2570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2571 2571 drop = [f for f in removed if f in m]
2572 2572 for f in drop:
2573 2573 del m[f]
2574 2574 files = changed + removed
2575 2575 md = None
2576 2576 if not files:
2577 2577 # if no "files" actually changed in terms of the changelog,
2578 2578 # try hard to detect unmodified manifest entry so that the
2579 2579 # exact same commit can be reproduced later on convert.
2580 2580 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2581 2581 if not files and md:
2582 2582 self.ui.debug('not reusing manifest (no file change in '
2583 2583 'changelog, but manifest differs)\n')
2584 2584 if files or md:
2585 2585 self.ui.note(_("committing manifest\n"))
2586 2586 # we're using narrowmatch here since it's already applied at
2587 2587 # other stages (such as dirstate.walk), so we're already
2588 2588 # ignoring things outside of narrowspec in most cases. The
2589 2589 # one case where we might have files outside the narrowspec
2590 2590 # at this point is merges, and we already error out in the
2591 2591 # case where the merge has files outside of the narrowspec,
2592 2592 # so this is safe.
2593 2593 mn = mctx.write(trp, linkrev,
2594 2594 p1.manifestnode(), p2.manifestnode(),
2595 2595 added, drop, match=self.narrowmatch())
2596 2596 else:
2597 2597 self.ui.debug('reusing manifest form p1 (listed files '
2598 2598 'actually unchanged)\n')
2599 2599 mn = p1.manifestnode()
2600 2600 else:
2601 2601 self.ui.debug('reusing manifest from p1 (no file change)\n')
2602 2602 mn = p1.manifestnode()
2603 2603 files = []
2604 2604
2605 2605 # update changelog
2606 2606 self.ui.note(_("committing changelog\n"))
2607 2607 self.changelog.delayupdate(tr)
2608 2608 n = self.changelog.add(mn, files, ctx.description(),
2609 2609 trp, p1.node(), p2.node(),
2610 2610 user, ctx.date(), ctx.extra().copy())
2611 2611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2612 2612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2613 2613 parent2=xp2)
2614 2614 # set the new commit is proper phase
2615 2615 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2616 2616 if targetphase:
2617 2617 # retract boundary do not alter parent changeset.
2618 2618 # if a parent have higher the resulting phase will
2619 2619 # be compliant anyway
2620 2620 #
2621 2621 # if minimal phase was 0 we don't need to retract anything
2622 2622 phases.registernew(self, tr, targetphase, [n])
2623 2623 return n
2624 2624
2625 2625 @unfilteredmethod
2626 2626 def destroying(self):
2627 2627 '''Inform the repository that nodes are about to be destroyed.
2628 2628 Intended for use by strip and rollback, so there's a common
2629 2629 place for anything that has to be done before destroying history.
2630 2630
2631 2631 This is mostly useful for saving state that is in memory and waiting
2632 2632 to be flushed when the current lock is released. Because a call to
2633 2633 destroyed is imminent, the repo will be invalidated causing those
2634 2634 changes to stay in memory (waiting for the next unlock), or vanish
2635 2635 completely.
2636 2636 '''
2637 2637 # When using the same lock to commit and strip, the phasecache is left
2638 2638 # dirty after committing. Then when we strip, the repo is invalidated,
2639 2639 # causing those changes to disappear.
2640 2640 if '_phasecache' in vars(self):
2641 2641 self._phasecache.write()
2642 2642
2643 2643 @unfilteredmethod
2644 2644 def destroyed(self):
2645 2645 '''Inform the repository that nodes have been destroyed.
2646 2646 Intended for use by strip and rollback, so there's a common
2647 2647 place for anything that has to be done after destroying history.
2648 2648 '''
2649 2649 # When one tries to:
2650 2650 # 1) destroy nodes thus calling this method (e.g. strip)
2651 2651 # 2) use phasecache somewhere (e.g. commit)
2652 2652 #
2653 2653 # then 2) will fail because the phasecache contains nodes that were
2654 2654 # removed. We can either remove phasecache from the filecache,
2655 2655 # causing it to reload next time it is accessed, or simply filter
2656 2656 # the removed nodes now and write the updated cache.
2657 2657 self._phasecache.filterunknown(self)
2658 2658 self._phasecache.write()
2659 2659
2660 2660 # refresh all repository caches
2661 2661 self.updatecaches()
2662 2662
2663 2663 # Ensure the persistent tag cache is updated. Doing it now
2664 2664 # means that the tag cache only has to worry about destroyed
2665 2665 # heads immediately after a strip/rollback. That in turn
2666 2666 # guarantees that "cachetip == currenttip" (comparing both rev
2667 2667 # and node) always means no nodes have been added or destroyed.
2668 2668
2669 2669 # XXX this is suboptimal when qrefresh'ing: we strip the current
2670 2670 # head, refresh the tag cache, then immediately add a new head.
2671 2671 # But I think doing it this way is necessary for the "instant
2672 2672 # tag cache retrieval" case to work.
2673 2673 self.invalidate()
2674 2674
2675 2675 def status(self, node1='.', node2=None, match=None,
2676 2676 ignored=False, clean=False, unknown=False,
2677 2677 listsubrepos=False):
2678 2678 '''a convenience method that calls node1.status(node2)'''
2679 2679 return self[node1].status(node2, match, ignored, clean, unknown,
2680 2680 listsubrepos)
2681 2681
2682 2682 def addpostdsstatus(self, ps):
2683 2683 """Add a callback to run within the wlock, at the point at which status
2684 2684 fixups happen.
2685 2685
2686 2686 On status completion, callback(wctx, status) will be called with the
2687 2687 wlock held, unless the dirstate has changed from underneath or the wlock
2688 2688 couldn't be grabbed.
2689 2689
2690 2690 Callbacks should not capture and use a cached copy of the dirstate --
2691 2691 it might change in the meanwhile. Instead, they should access the
2692 2692 dirstate via wctx.repo().dirstate.
2693 2693
2694 2694 This list is emptied out after each status run -- extensions should
2695 2695 make sure it adds to this list each time dirstate.status is called.
2696 2696 Extensions should also make sure they don't call this for statuses
2697 2697 that don't involve the dirstate.
2698 2698 """
2699 2699
2700 2700 # The list is located here for uniqueness reasons -- it is actually
2701 2701 # managed by the workingctx, but that isn't unique per-repo.
2702 2702 self._postdsstatus.append(ps)
2703 2703
2704 2704 def postdsstatus(self):
2705 2705 """Used by workingctx to get the list of post-dirstate-status hooks."""
2706 2706 return self._postdsstatus
2707 2707
2708 2708 def clearpostdsstatus(self):
2709 2709 """Used by workingctx to clear post-dirstate-status hooks."""
2710 2710 del self._postdsstatus[:]
2711 2711
2712 2712 def heads(self, start=None):
2713 2713 if start is None:
2714 2714 cl = self.changelog
2715 2715 headrevs = reversed(cl.headrevs())
2716 2716 return [cl.node(rev) for rev in headrevs]
2717 2717
2718 2718 heads = self.changelog.heads(start)
2719 2719 # sort the output in rev descending order
2720 2720 return sorted(heads, key=self.changelog.rev, reverse=True)
2721 2721
2722 2722 def branchheads(self, branch=None, start=None, closed=False):
2723 2723 '''return a (possibly filtered) list of heads for the given branch
2724 2724
2725 2725 Heads are returned in topological order, from newest to oldest.
2726 2726 If branch is None, use the dirstate branch.
2727 2727 If start is not None, return only heads reachable from start.
2728 2728 If closed is True, return heads that are marked as closed as well.
2729 2729 '''
2730 2730 if branch is None:
2731 2731 branch = self[None].branch()
2732 2732 branches = self.branchmap()
2733 if branch not in branches:
2733 if branch not in branches.entries:
2734 2734 return []
2735 2735 # the cache returns heads ordered lowest to highest
2736 2736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2737 2737 if start is not None:
2738 2738 # filter out the heads that cannot be reached from startrev
2739 2739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2740 2740 bheads = [h for h in bheads if h in fbheads]
2741 2741 return bheads
2742 2742
2743 2743 def branches(self, nodes):
2744 2744 if not nodes:
2745 2745 nodes = [self.changelog.tip()]
2746 2746 b = []
2747 2747 for n in nodes:
2748 2748 t = n
2749 2749 while True:
2750 2750 p = self.changelog.parents(n)
2751 2751 if p[1] != nullid or p[0] == nullid:
2752 2752 b.append((t, n, p[0], p[1]))
2753 2753 break
2754 2754 n = p[0]
2755 2755 return b
2756 2756
2757 2757 def between(self, pairs):
2758 2758 r = []
2759 2759
2760 2760 for top, bottom in pairs:
2761 2761 n, l, i = top, [], 0
2762 2762 f = 1
2763 2763
2764 2764 while n != bottom and n != nullid:
2765 2765 p = self.changelog.parents(n)[0]
2766 2766 if i == f:
2767 2767 l.append(n)
2768 2768 f = f * 2
2769 2769 n = p
2770 2770 i += 1
2771 2771
2772 2772 r.append(l)
2773 2773
2774 2774 return r
2775 2775
2776 2776 def checkpush(self, pushop):
2777 2777 """Extensions can override this function if additional checks have
2778 2778 to be performed before pushing, or call it if they override push
2779 2779 command.
2780 2780 """
2781 2781
2782 2782 @unfilteredpropertycache
2783 2783 def prepushoutgoinghooks(self):
2784 2784 """Return util.hooks consists of a pushop with repo, remote, outgoing
2785 2785 methods, which are called before pushing changesets.
2786 2786 """
2787 2787 return util.hooks()
2788 2788
2789 2789 def pushkey(self, namespace, key, old, new):
2790 2790 try:
2791 2791 tr = self.currenttransaction()
2792 2792 hookargs = {}
2793 2793 if tr is not None:
2794 2794 hookargs.update(tr.hookargs)
2795 2795 hookargs = pycompat.strkwargs(hookargs)
2796 2796 hookargs[r'namespace'] = namespace
2797 2797 hookargs[r'key'] = key
2798 2798 hookargs[r'old'] = old
2799 2799 hookargs[r'new'] = new
2800 2800 self.hook('prepushkey', throw=True, **hookargs)
2801 2801 except error.HookAbort as exc:
2802 2802 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2803 2803 if exc.hint:
2804 2804 self.ui.write_err(_("(%s)\n") % exc.hint)
2805 2805 return False
2806 2806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2807 2807 ret = pushkey.push(self, namespace, key, old, new)
2808 2808 def runhook():
2809 2809 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2810 2810 ret=ret)
2811 2811 self._afterlock(runhook)
2812 2812 return ret
2813 2813
2814 2814 def listkeys(self, namespace):
2815 2815 self.hook('prelistkeys', throw=True, namespace=namespace)
2816 2816 self.ui.debug('listing keys for "%s"\n' % namespace)
2817 2817 values = pushkey.list(self, namespace)
2818 2818 self.hook('listkeys', namespace=namespace, values=values)
2819 2819 return values
2820 2820
2821 2821 def debugwireargs(self, one, two, three=None, four=None, five=None):
2822 2822 '''used to test argument passing over the wire'''
2823 2823 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2824 2824 pycompat.bytestr(four),
2825 2825 pycompat.bytestr(five))
2826 2826
2827 2827 def savecommitmessage(self, text):
2828 2828 fp = self.vfs('last-message.txt', 'wb')
2829 2829 try:
2830 2830 fp.write(text)
2831 2831 finally:
2832 2832 fp.close()
2833 2833 return self.pathto(fp.name[len(self.root) + 1:])
2834 2834
2835 2835 # used to avoid circular references so destructors work
2836 2836 def aftertrans(files):
2837 2837 renamefiles = [tuple(t) for t in files]
2838 2838 def a():
2839 2839 for vfs, src, dest in renamefiles:
2840 2840 # if src and dest refer to a same file, vfs.rename is a no-op,
2841 2841 # leaving both src and dest on disk. delete dest to make sure
2842 2842 # the rename couldn't be such a no-op.
2843 2843 vfs.tryunlink(dest)
2844 2844 try:
2845 2845 vfs.rename(src, dest)
2846 2846 except OSError: # journal file does not yet exist
2847 2847 pass
2848 2848 return a
2849 2849
2850 2850 def undoname(fn):
2851 2851 base, name = os.path.split(fn)
2852 2852 assert name.startswith('journal')
2853 2853 return os.path.join(base, name.replace('journal', 'undo', 1))
2854 2854
2855 2855 def instance(ui, path, create, intents=None, createopts=None):
2856 2856 localpath = util.urllocalpath(path)
2857 2857 if create:
2858 2858 createrepository(ui, localpath, createopts=createopts)
2859 2859
2860 2860 return makelocalrepository(ui, localpath, intents=intents)
2861 2861
2862 2862 def islocal(path):
2863 2863 return True
2864 2864
2865 2865 def defaultcreateopts(ui, createopts=None):
2866 2866 """Populate the default creation options for a repository.
2867 2867
2868 2868 A dictionary of explicitly requested creation options can be passed
2869 2869 in. Missing keys will be populated.
2870 2870 """
2871 2871 createopts = dict(createopts or {})
2872 2872
2873 2873 if 'backend' not in createopts:
2874 2874 # experimental config: storage.new-repo-backend
2875 2875 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2876 2876
2877 2877 return createopts
2878 2878
2879 2879 def newreporequirements(ui, createopts):
2880 2880 """Determine the set of requirements for a new local repository.
2881 2881
2882 2882 Extensions can wrap this function to specify custom requirements for
2883 2883 new repositories.
2884 2884 """
2885 2885 # If the repo is being created from a shared repository, we copy
2886 2886 # its requirements.
2887 2887 if 'sharedrepo' in createopts:
2888 2888 requirements = set(createopts['sharedrepo'].requirements)
2889 2889 if createopts.get('sharedrelative'):
2890 2890 requirements.add('relshared')
2891 2891 else:
2892 2892 requirements.add('shared')
2893 2893
2894 2894 return requirements
2895 2895
2896 2896 if 'backend' not in createopts:
2897 2897 raise error.ProgrammingError('backend key not present in createopts; '
2898 2898 'was defaultcreateopts() called?')
2899 2899
2900 2900 if createopts['backend'] != 'revlogv1':
2901 2901 raise error.Abort(_('unable to determine repository requirements for '
2902 2902 'storage backend: %s') % createopts['backend'])
2903 2903
2904 2904 requirements = {'revlogv1'}
2905 2905 if ui.configbool('format', 'usestore'):
2906 2906 requirements.add('store')
2907 2907 if ui.configbool('format', 'usefncache'):
2908 2908 requirements.add('fncache')
2909 2909 if ui.configbool('format', 'dotencode'):
2910 2910 requirements.add('dotencode')
2911 2911
2912 2912 compengine = ui.config('experimental', 'format.compression')
2913 2913 if compengine not in util.compengines:
2914 2914 raise error.Abort(_('compression engine %s defined by '
2915 2915 'experimental.format.compression not available') %
2916 2916 compengine,
2917 2917 hint=_('run "hg debuginstall" to list available '
2918 2918 'compression engines'))
2919 2919
2920 2920 # zlib is the historical default and doesn't need an explicit requirement.
2921 2921 if compengine != 'zlib':
2922 2922 requirements.add('exp-compression-%s' % compengine)
2923 2923
2924 2924 if scmutil.gdinitconfig(ui):
2925 2925 requirements.add('generaldelta')
2926 2926 if ui.configbool('format', 'sparse-revlog'):
2927 2927 requirements.add(SPARSEREVLOG_REQUIREMENT)
2928 2928 if ui.configbool('experimental', 'treemanifest'):
2929 2929 requirements.add('treemanifest')
2930 2930
2931 2931 revlogv2 = ui.config('experimental', 'revlogv2')
2932 2932 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2933 2933 requirements.remove('revlogv1')
2934 2934 # generaldelta is implied by revlogv2.
2935 2935 requirements.discard('generaldelta')
2936 2936 requirements.add(REVLOGV2_REQUIREMENT)
2937 2937 # experimental config: format.internal-phase
2938 2938 if ui.configbool('format', 'internal-phase'):
2939 2939 requirements.add('internal-phase')
2940 2940
2941 2941 if createopts.get('narrowfiles'):
2942 2942 requirements.add(repository.NARROW_REQUIREMENT)
2943 2943
2944 2944 if createopts.get('lfs'):
2945 2945 requirements.add('lfs')
2946 2946
2947 2947 return requirements
2948 2948
2949 2949 def filterknowncreateopts(ui, createopts):
2950 2950 """Filters a dict of repo creation options against options that are known.
2951 2951
2952 2952 Receives a dict of repo creation options and returns a dict of those
2953 2953 options that we don't know how to handle.
2954 2954
2955 2955 This function is called as part of repository creation. If the
2956 2956 returned dict contains any items, repository creation will not
2957 2957 be allowed, as it means there was a request to create a repository
2958 2958 with options not recognized by loaded code.
2959 2959
2960 2960 Extensions can wrap this function to filter out creation options
2961 2961 they know how to handle.
2962 2962 """
2963 2963 known = {
2964 2964 'backend',
2965 2965 'lfs',
2966 2966 'narrowfiles',
2967 2967 'sharedrepo',
2968 2968 'sharedrelative',
2969 2969 'shareditems',
2970 2970 'shallowfilestore',
2971 2971 }
2972 2972
2973 2973 return {k: v for k, v in createopts.items() if k not in known}
2974 2974
2975 2975 def createrepository(ui, path, createopts=None):
2976 2976 """Create a new repository in a vfs.
2977 2977
2978 2978 ``path`` path to the new repo's working directory.
2979 2979 ``createopts`` options for the new repository.
2980 2980
2981 2981 The following keys for ``createopts`` are recognized:
2982 2982
2983 2983 backend
2984 2984 The storage backend to use.
2985 2985 lfs
2986 2986 Repository will be created with ``lfs`` requirement. The lfs extension
2987 2987 will automatically be loaded when the repository is accessed.
2988 2988 narrowfiles
2989 2989 Set up repository to support narrow file storage.
2990 2990 sharedrepo
2991 2991 Repository object from which storage should be shared.
2992 2992 sharedrelative
2993 2993 Boolean indicating if the path to the shared repo should be
2994 2994 stored as relative. By default, the pointer to the "parent" repo
2995 2995 is stored as an absolute path.
2996 2996 shareditems
2997 2997 Set of items to share to the new repository (in addition to storage).
2998 2998 shallowfilestore
2999 2999 Indicates that storage for files should be shallow (not all ancestor
3000 3000 revisions are known).
3001 3001 """
3002 3002 createopts = defaultcreateopts(ui, createopts=createopts)
3003 3003
3004 3004 unknownopts = filterknowncreateopts(ui, createopts)
3005 3005
3006 3006 if not isinstance(unknownopts, dict):
3007 3007 raise error.ProgrammingError('filterknowncreateopts() did not return '
3008 3008 'a dict')
3009 3009
3010 3010 if unknownopts:
3011 3011 raise error.Abort(_('unable to create repository because of unknown '
3012 3012 'creation option: %s') %
3013 3013 ', '.join(sorted(unknownopts)),
3014 3014 hint=_('is a required extension not loaded?'))
3015 3015
3016 3016 requirements = newreporequirements(ui, createopts=createopts)
3017 3017
3018 3018 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3019 3019
3020 3020 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3021 3021 if hgvfs.exists():
3022 3022 raise error.RepoError(_('repository %s already exists') % path)
3023 3023
3024 3024 if 'sharedrepo' in createopts:
3025 3025 sharedpath = createopts['sharedrepo'].sharedpath
3026 3026
3027 3027 if createopts.get('sharedrelative'):
3028 3028 try:
3029 3029 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3030 3030 except (IOError, ValueError) as e:
3031 3031 # ValueError is raised on Windows if the drive letters differ
3032 3032 # on each path.
3033 3033 raise error.Abort(_('cannot calculate relative path'),
3034 3034 hint=stringutil.forcebytestr(e))
3035 3035
3036 3036 if not wdirvfs.exists():
3037 3037 wdirvfs.makedirs()
3038 3038
3039 3039 hgvfs.makedir(notindexed=True)
3040 3040 if 'sharedrepo' not in createopts:
3041 3041 hgvfs.mkdir(b'cache')
3042 3042 hgvfs.mkdir(b'wcache')
3043 3043
3044 3044 if b'store' in requirements and 'sharedrepo' not in createopts:
3045 3045 hgvfs.mkdir(b'store')
3046 3046
3047 3047 # We create an invalid changelog outside the store so very old
3048 3048 # Mercurial versions (which didn't know about the requirements
3049 3049 # file) encounter an error on reading the changelog. This
3050 3050 # effectively locks out old clients and prevents them from
3051 3051 # mucking with a repo in an unknown format.
3052 3052 #
3053 3053 # The revlog header has version 2, which won't be recognized by
3054 3054 # such old clients.
3055 3055 hgvfs.append(b'00changelog.i',
3056 3056 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3057 3057 b'layout')
3058 3058
3059 3059 scmutil.writerequires(hgvfs, requirements)
3060 3060
3061 3061 # Write out file telling readers where to find the shared store.
3062 3062 if 'sharedrepo' in createopts:
3063 3063 hgvfs.write(b'sharedpath', sharedpath)
3064 3064
3065 3065 if createopts.get('shareditems'):
3066 3066 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3067 3067 hgvfs.write(b'shared', shared)
3068 3068
3069 3069 def poisonrepository(repo):
3070 3070 """Poison a repository instance so it can no longer be used."""
3071 3071 # Perform any cleanup on the instance.
3072 3072 repo.close()
3073 3073
3074 3074 # Our strategy is to replace the type of the object with one that
3075 3075 # has all attribute lookups result in error.
3076 3076 #
3077 3077 # But we have to allow the close() method because some constructors
3078 3078 # of repos call close() on repo references.
3079 3079 class poisonedrepository(object):
3080 3080 def __getattribute__(self, item):
3081 3081 if item == r'close':
3082 3082 return object.__getattribute__(self, item)
3083 3083
3084 3084 raise error.ProgrammingError('repo instances should not be used '
3085 3085 'after unshare')
3086 3086
3087 3087 def close(self):
3088 3088 pass
3089 3089
3090 3090 # We may have a repoview, which intercepts __setattr__. So be sure
3091 3091 # we operate at the lowest level possible.
3092 3092 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now