##// END OF EJS Templates
branchmap: remove the dict interface from the branchcache class (API)...
Pulkit Goyal -
r42168:624d6683 default
parent child Browse files
Show More
@@ -1,600 +1,618
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 stringutil,
26 stringutil,
27 )
27 )
28
28
29 calcsize = struct.calcsize
29 calcsize = struct.calcsize
30 pack_into = struct.pack_into
30 pack_into = struct.pack_into
31 unpack_from = struct.unpack_from
31 unpack_from = struct.unpack_from
32
32
33
33
34 ### Nearest subset relation
34 ### Nearest subset relation
35 # Nearest subset of filter X is a filter Y so that:
35 # Nearest subset of filter X is a filter Y so that:
36 # * Y is included in X,
36 # * Y is included in X,
37 # * X - Y is as small as possible.
37 # * X - Y is as small as possible.
38 # This create and ordering used for branchmap purpose.
38 # This create and ordering used for branchmap purpose.
39 # the ordering may be partial
39 # the ordering may be partial
40 subsettable = {None: 'visible',
40 subsettable = {None: 'visible',
41 'visible-hidden': 'visible',
41 'visible-hidden': 'visible',
42 'visible': 'served',
42 'visible': 'served',
43 'served': 'immutable',
43 'served': 'immutable',
44 'immutable': 'base'}
44 'immutable': 'base'}
45
45
46
46
47 class BranchMapCache(object):
47 class BranchMapCache(object):
48 """mapping of filtered views of repo with their branchcache"""
48 """mapping of filtered views of repo with their branchcache"""
49 def __init__(self):
49 def __init__(self):
50 self._per_filter = {}
50 self._per_filter = {}
51
51
52 def __getitem__(self, repo):
52 def __getitem__(self, repo):
53 self.updatecache(repo)
53 self.updatecache(repo)
54 return self._per_filter[repo.filtername]
54 return self._per_filter[repo.filtername]
55
55
56 def updatecache(self, repo):
56 def updatecache(self, repo):
57 """Update the cache for the given filtered view on a repository"""
57 """Update the cache for the given filtered view on a repository"""
58 # This can trigger updates for the caches for subsets of the filtered
58 # This can trigger updates for the caches for subsets of the filtered
59 # view, e.g. when there is no cache for this filtered view or the cache
59 # view, e.g. when there is no cache for this filtered view or the cache
60 # is stale.
60 # is stale.
61
61
62 cl = repo.changelog
62 cl = repo.changelog
63 filtername = repo.filtername
63 filtername = repo.filtername
64 bcache = self._per_filter.get(filtername)
64 bcache = self._per_filter.get(filtername)
65 if bcache is None or not bcache.validfor(repo):
65 if bcache is None or not bcache.validfor(repo):
66 # cache object missing or cache object stale? Read from disk
66 # cache object missing or cache object stale? Read from disk
67 bcache = branchcache.fromfile(repo)
67 bcache = branchcache.fromfile(repo)
68
68
69 revs = []
69 revs = []
70 if bcache is None:
70 if bcache is None:
71 # no (fresh) cache available anymore, perhaps we can re-use
71 # no (fresh) cache available anymore, perhaps we can re-use
72 # the cache for a subset, then extend that to add info on missing
72 # the cache for a subset, then extend that to add info on missing
73 # revisions.
73 # revisions.
74 subsetname = subsettable.get(filtername)
74 subsetname = subsettable.get(filtername)
75 if subsetname is not None:
75 if subsetname is not None:
76 subset = repo.filtered(subsetname)
76 subset = repo.filtered(subsetname)
77 bcache = self[subset].copy()
77 bcache = self[subset].copy()
78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 else:
80 else:
81 # nothing to fall back on, start empty.
81 # nothing to fall back on, start empty.
82 bcache = branchcache()
82 bcache = branchcache()
83
83
84 revs.extend(cl.revs(start=bcache.tiprev + 1))
84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 if revs:
85 if revs:
86 bcache.update(repo, revs)
86 bcache.update(repo, revs)
87
87
88 assert bcache.validfor(repo), filtername
88 assert bcache.validfor(repo), filtername
89 self._per_filter[repo.filtername] = bcache
89 self._per_filter[repo.filtername] = bcache
90
90
91 def replace(self, repo, remotebranchmap):
91 def replace(self, repo, remotebranchmap):
92 """Replace the branchmap cache for a repo with a branch mapping.
92 """Replace the branchmap cache for a repo with a branch mapping.
93
93
94 This is likely only called during clone with a branch map from a
94 This is likely only called during clone with a branch map from a
95 remote.
95 remote.
96
96
97 """
97 """
98 cl = repo.changelog
98 cl = repo.changelog
99 clrev = cl.rev
99 clrev = cl.rev
100 clbranchinfo = cl.branchinfo
100 clbranchinfo = cl.branchinfo
101 rbheads = []
101 rbheads = []
102 closed = []
102 closed = []
103 for bheads in remotebranchmap.itervalues():
103 for bheads in remotebranchmap.itervalues():
104 rbheads += bheads
104 rbheads += bheads
105 for h in bheads:
105 for h in bheads:
106 r = clrev(h)
106 r = clrev(h)
107 b, c = clbranchinfo(r)
107 b, c = clbranchinfo(r)
108 if c:
108 if c:
109 closed.append(h)
109 closed.append(h)
110
110
111 if rbheads:
111 if rbheads:
112 rtiprev = max((int(clrev(node)) for node in rbheads))
112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 cache = branchcache(
113 cache = branchcache(
114 remotebranchmap, repo[rtiprev].node(), rtiprev,
114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 closednodes=closed)
115 closednodes=closed)
116
116
117 # Try to stick it as low as possible
117 # Try to stick it as low as possible
118 # filter above served are unlikely to be fetch from a clone
118 # filter above served are unlikely to be fetch from a clone
119 for candidate in ('base', 'immutable', 'served'):
119 for candidate in ('base', 'immutable', 'served'):
120 rview = repo.filtered(candidate)
120 rview = repo.filtered(candidate)
121 if cache.validfor(rview):
121 if cache.validfor(rview):
122 self._per_filter[candidate] = cache
122 self._per_filter[candidate] = cache
123 cache.write(rview)
123 cache.write(rview)
124 return
124 return
125
125
126 def clear(self):
126 def clear(self):
127 self._per_filter.clear()
127 self._per_filter.clear()
128
128
129
129
130 class branchcache(dict):
130 class branchcache(object):
131 """A dict like object that hold branches heads cache.
131 """A dict like object that hold branches heads cache.
132
132
133 This cache is used to avoid costly computations to determine all the
133 This cache is used to avoid costly computations to determine all the
134 branch heads of a repo.
134 branch heads of a repo.
135
135
136 The cache is serialized on disk in the following format:
136 The cache is serialized on disk in the following format:
137
137
138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 <branch head hex node> <open/closed state> <branch name>
139 <branch head hex node> <open/closed state> <branch name>
140 <branch head hex node> <open/closed state> <branch name>
140 <branch head hex node> <open/closed state> <branch name>
141 ...
141 ...
142
142
143 The first line is used to check if the cache is still valid. If the
143 The first line is used to check if the cache is still valid. If the
144 branch cache is for a filtered repo view, an optional third hash is
144 branch cache is for a filtered repo view, an optional third hash is
145 included that hashes the hashes of all filtered revisions.
145 included that hashes the hashes of all filtered revisions.
146
146
147 The open/closed state is represented by a single letter 'o' or 'c'.
147 The open/closed state is represented by a single letter 'o' or 'c'.
148 This field can be used to avoid changelog reads when determining if a
148 This field can be used to avoid changelog reads when determining if a
149 branch head closes a branch or not.
149 branch head closes a branch or not.
150 """
150 """
151
151
152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 filteredhash=None, closednodes=None):
153 filteredhash=None, closednodes=None):
154 super(branchcache, self).__init__(entries)
155 self.tipnode = tipnode
154 self.tipnode = tipnode
156 self.tiprev = tiprev
155 self.tiprev = tiprev
157 self.filteredhash = filteredhash
156 self.filteredhash = filteredhash
158 # closednodes is a set of nodes that close their branch. If the branch
157 # closednodes is a set of nodes that close their branch. If the branch
159 # cache has been updated, it may contain nodes that are no longer
158 # cache has been updated, it may contain nodes that are no longer
160 # heads.
159 # heads.
161 if closednodes is None:
160 if closednodes is None:
162 self._closednodes = set()
161 self._closednodes = set()
163 else:
162 else:
164 self._closednodes = closednodes
163 self._closednodes = closednodes
164 self.entries = dict(entries)
165
166 def __iter__(self):
167 return iter(self.entries)
168
169 def __setitem__(self, key, value):
170 self.entries[key] = value
171
172 def __getitem__(self, key):
173 return self.entries[key]
174
175 def setdefault(self, *args):
176 return self.entries.setdefault(*args)
177
178 def iteritems(self):
179 return self.entries.iteritems()
180
181 def itervalues(self):
182 return self.entries.itervalues()
165
183
166 @classmethod
184 @classmethod
167 def fromfile(cls, repo):
185 def fromfile(cls, repo):
168 f = None
186 f = None
169 try:
187 try:
170 f = repo.cachevfs(cls._filename(repo))
188 f = repo.cachevfs(cls._filename(repo))
171 lineiter = iter(f)
189 lineiter = iter(f)
172 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
190 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
173 last, lrev = cachekey[:2]
191 last, lrev = cachekey[:2]
174 last, lrev = bin(last), int(lrev)
192 last, lrev = bin(last), int(lrev)
175 filteredhash = None
193 filteredhash = None
176 if len(cachekey) > 2:
194 if len(cachekey) > 2:
177 filteredhash = bin(cachekey[2])
195 filteredhash = bin(cachekey[2])
178 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
196 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
179 if not bcache.validfor(repo):
197 if not bcache.validfor(repo):
180 # invalidate the cache
198 # invalidate the cache
181 raise ValueError(r'tip differs')
199 raise ValueError(r'tip differs')
182 bcache.load(repo, lineiter)
200 bcache.load(repo, lineiter)
183 except (IOError, OSError):
201 except (IOError, OSError):
184 return None
202 return None
185
203
186 except Exception as inst:
204 except Exception as inst:
187 if repo.ui.debugflag:
205 if repo.ui.debugflag:
188 msg = 'invalid branchheads cache'
206 msg = 'invalid branchheads cache'
189 if repo.filtername is not None:
207 if repo.filtername is not None:
190 msg += ' (%s)' % repo.filtername
208 msg += ' (%s)' % repo.filtername
191 msg += ': %s\n'
209 msg += ': %s\n'
192 repo.ui.debug(msg % pycompat.bytestr(inst))
210 repo.ui.debug(msg % pycompat.bytestr(inst))
193 bcache = None
211 bcache = None
194
212
195 finally:
213 finally:
196 if f:
214 if f:
197 f.close()
215 f.close()
198
216
199 return bcache
217 return bcache
200
218
201 def load(self, repo, lineiter):
219 def load(self, repo, lineiter):
202 """ fully loads the branchcache by reading from the file using the line
220 """ fully loads the branchcache by reading from the file using the line
203 iterator passed"""
221 iterator passed"""
204 cl = repo.changelog
222 cl = repo.changelog
205 for line in lineiter:
223 for line in lineiter:
206 line = line.rstrip('\n')
224 line = line.rstrip('\n')
207 if not line:
225 if not line:
208 continue
226 continue
209 node, state, label = line.split(" ", 2)
227 node, state, label = line.split(" ", 2)
210 if state not in 'oc':
228 if state not in 'oc':
211 raise ValueError(r'invalid branch state')
229 raise ValueError(r'invalid branch state')
212 label = encoding.tolocal(label.strip())
230 label = encoding.tolocal(label.strip())
213 node = bin(node)
231 node = bin(node)
214 if not cl.hasnode(node):
232 if not cl.hasnode(node):
215 raise ValueError(
233 raise ValueError(
216 r'node %s does not exist' % pycompat.sysstr(hex(node)))
234 r'node %s does not exist' % pycompat.sysstr(hex(node)))
217 self.setdefault(label, []).append(node)
235 self.setdefault(label, []).append(node)
218 if state == 'c':
236 if state == 'c':
219 self._closednodes.add(node)
237 self._closednodes.add(node)
220
238
221 @staticmethod
239 @staticmethod
222 def _filename(repo):
240 def _filename(repo):
223 """name of a branchcache file for a given repo or repoview"""
241 """name of a branchcache file for a given repo or repoview"""
224 filename = "branch2"
242 filename = "branch2"
225 if repo.filtername:
243 if repo.filtername:
226 filename = '%s-%s' % (filename, repo.filtername)
244 filename = '%s-%s' % (filename, repo.filtername)
227 return filename
245 return filename
228
246
229 def validfor(self, repo):
247 def validfor(self, repo):
230 """Is the cache content valid regarding a repo
248 """Is the cache content valid regarding a repo
231
249
232 - False when cached tipnode is unknown or if we detect a strip.
250 - False when cached tipnode is unknown or if we detect a strip.
233 - True when cache is up to date or a subset of current repo."""
251 - True when cache is up to date or a subset of current repo."""
234 try:
252 try:
235 return ((self.tipnode == repo.changelog.node(self.tiprev))
253 return ((self.tipnode == repo.changelog.node(self.tiprev))
236 and (self.filteredhash ==
254 and (self.filteredhash ==
237 scmutil.filteredhash(repo, self.tiprev)))
255 scmutil.filteredhash(repo, self.tiprev)))
238 except IndexError:
256 except IndexError:
239 return False
257 return False
240
258
241 def _branchtip(self, heads):
259 def _branchtip(self, heads):
242 '''Return tuple with last open head in heads and false,
260 '''Return tuple with last open head in heads and false,
243 otherwise return last closed head and true.'''
261 otherwise return last closed head and true.'''
244 tip = heads[-1]
262 tip = heads[-1]
245 closed = True
263 closed = True
246 for h in reversed(heads):
264 for h in reversed(heads):
247 if h not in self._closednodes:
265 if h not in self._closednodes:
248 tip = h
266 tip = h
249 closed = False
267 closed = False
250 break
268 break
251 return tip, closed
269 return tip, closed
252
270
253 def branchtip(self, branch):
271 def branchtip(self, branch):
254 '''Return the tipmost open head on branch head, otherwise return the
272 '''Return the tipmost open head on branch head, otherwise return the
255 tipmost closed head on branch.
273 tipmost closed head on branch.
256 Raise KeyError for unknown branch.'''
274 Raise KeyError for unknown branch.'''
257 return self._branchtip(self[branch])[0]
275 return self._branchtip(self[branch])[0]
258
276
259 def iteropen(self, nodes):
277 def iteropen(self, nodes):
260 return (n for n in nodes if n not in self._closednodes)
278 return (n for n in nodes if n not in self._closednodes)
261
279
262 def branchheads(self, branch, closed=False):
280 def branchheads(self, branch, closed=False):
263 heads = self[branch]
281 heads = self[branch]
264 if not closed:
282 if not closed:
265 heads = list(self.iteropen(heads))
283 heads = list(self.iteropen(heads))
266 return heads
284 return heads
267
285
268 def iterbranches(self):
286 def iterbranches(self):
269 for bn, heads in self.iteritems():
287 for bn, heads in self.iteritems():
270 yield (bn, heads) + self._branchtip(heads)
288 yield (bn, heads) + self._branchtip(heads)
271
289
272 def copy(self):
290 def copy(self):
273 """return an deep copy of the branchcache object"""
291 """return an deep copy of the branchcache object"""
274 return type(self)(
292 return branchcache(
275 self, self.tipnode, self.tiprev, self.filteredhash,
293 self.entries, self.tipnode, self.tiprev, self.filteredhash,
276 self._closednodes)
294 self._closednodes)
277
295
278 def write(self, repo):
296 def write(self, repo):
279 try:
297 try:
280 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
298 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
281 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
299 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
282 if self.filteredhash is not None:
300 if self.filteredhash is not None:
283 cachekey.append(hex(self.filteredhash))
301 cachekey.append(hex(self.filteredhash))
284 f.write(" ".join(cachekey) + '\n')
302 f.write(" ".join(cachekey) + '\n')
285 nodecount = 0
303 nodecount = 0
286 for label, nodes in sorted(self.iteritems()):
304 for label, nodes in sorted(self.iteritems()):
287 label = encoding.fromlocal(label)
305 label = encoding.fromlocal(label)
288 for node in nodes:
306 for node in nodes:
289 nodecount += 1
307 nodecount += 1
290 if node in self._closednodes:
308 if node in self._closednodes:
291 state = 'c'
309 state = 'c'
292 else:
310 else:
293 state = 'o'
311 state = 'o'
294 f.write("%s %s %s\n" % (hex(node), state, label))
312 f.write("%s %s %s\n" % (hex(node), state, label))
295 f.close()
313 f.close()
296 repo.ui.log('branchcache',
314 repo.ui.log('branchcache',
297 'wrote %s branch cache with %d labels and %d nodes\n',
315 'wrote %s branch cache with %d labels and %d nodes\n',
298 repo.filtername, len(self), nodecount)
316 repo.filtername, len(self.entries), nodecount)
299 except (IOError, OSError, error.Abort) as inst:
317 except (IOError, OSError, error.Abort) as inst:
300 # Abort may be raised by read only opener, so log and continue
318 # Abort may be raised by read only opener, so log and continue
301 repo.ui.debug("couldn't write branch cache: %s\n" %
319 repo.ui.debug("couldn't write branch cache: %s\n" %
302 stringutil.forcebytestr(inst))
320 stringutil.forcebytestr(inst))
303
321
304 def update(self, repo, revgen):
322 def update(self, repo, revgen):
305 """Given a branchhead cache, self, that may have extra nodes or be
323 """Given a branchhead cache, self, that may have extra nodes or be
306 missing heads, and a generator of nodes that are strictly a superset of
324 missing heads, and a generator of nodes that are strictly a superset of
307 heads missing, this function updates self to be correct.
325 heads missing, this function updates self to be correct.
308 """
326 """
309 starttime = util.timer()
327 starttime = util.timer()
310 cl = repo.changelog
328 cl = repo.changelog
311 # collect new branch entries
329 # collect new branch entries
312 newbranches = {}
330 newbranches = {}
313 getbranchinfo = repo.revbranchcache().branchinfo
331 getbranchinfo = repo.revbranchcache().branchinfo
314 for r in revgen:
332 for r in revgen:
315 branch, closesbranch = getbranchinfo(r)
333 branch, closesbranch = getbranchinfo(r)
316 newbranches.setdefault(branch, []).append(r)
334 newbranches.setdefault(branch, []).append(r)
317 if closesbranch:
335 if closesbranch:
318 self._closednodes.add(cl.node(r))
336 self._closednodes.add(cl.node(r))
319
337
320 # fetch current topological heads to speed up filtering
338 # fetch current topological heads to speed up filtering
321 topoheads = set(cl.headrevs())
339 topoheads = set(cl.headrevs())
322
340
323 # if older branchheads are reachable from new ones, they aren't
341 # if older branchheads are reachable from new ones, they aren't
324 # really branchheads. Note checking parents is insufficient:
342 # really branchheads. Note checking parents is insufficient:
325 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
343 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
326 for branch, newheadrevs in newbranches.iteritems():
344 for branch, newheadrevs in newbranches.iteritems():
327 bheads = self.setdefault(branch, [])
345 bheads = self.setdefault(branch, [])
328 bheadset = set(cl.rev(node) for node in bheads)
346 bheadset = set(cl.rev(node) for node in bheads)
329
347
330 # This have been tested True on all internal usage of this function.
348 # This have been tested True on all internal usage of this function.
331 # run it again in case of doubt
349 # run it again in case of doubt
332 # assert not (set(bheadrevs) & set(newheadrevs))
350 # assert not (set(bheadrevs) & set(newheadrevs))
333 bheadset.update(newheadrevs)
351 bheadset.update(newheadrevs)
334
352
335 # This prunes out two kinds of heads - heads that are superseded by
353 # This prunes out two kinds of heads - heads that are superseded by
336 # a head in newheadrevs, and newheadrevs that are not heads because
354 # a head in newheadrevs, and newheadrevs that are not heads because
337 # an existing head is their descendant.
355 # an existing head is their descendant.
338 uncertain = bheadset - topoheads
356 uncertain = bheadset - topoheads
339 if uncertain:
357 if uncertain:
340 floorrev = min(uncertain)
358 floorrev = min(uncertain)
341 ancestors = set(cl.ancestors(newheadrevs, floorrev))
359 ancestors = set(cl.ancestors(newheadrevs, floorrev))
342 bheadset -= ancestors
360 bheadset -= ancestors
343 bheadrevs = sorted(bheadset)
361 bheadrevs = sorted(bheadset)
344 self[branch] = [cl.node(rev) for rev in bheadrevs]
362 self[branch] = [cl.node(rev) for rev in bheadrevs]
345 tiprev = bheadrevs[-1]
363 tiprev = bheadrevs[-1]
346 if tiprev > self.tiprev:
364 if tiprev > self.tiprev:
347 self.tipnode = cl.node(tiprev)
365 self.tipnode = cl.node(tiprev)
348 self.tiprev = tiprev
366 self.tiprev = tiprev
349
367
350 if not self.validfor(repo):
368 if not self.validfor(repo):
351 # cache key are not valid anymore
369 # cache key are not valid anymore
352 self.tipnode = nullid
370 self.tipnode = nullid
353 self.tiprev = nullrev
371 self.tiprev = nullrev
354 for heads in self.values():
372 for heads in self.itervalues():
355 tiprev = max(cl.rev(node) for node in heads)
373 tiprev = max(cl.rev(node) for node in heads)
356 if tiprev > self.tiprev:
374 if tiprev > self.tiprev:
357 self.tipnode = cl.node(tiprev)
375 self.tipnode = cl.node(tiprev)
358 self.tiprev = tiprev
376 self.tiprev = tiprev
359 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
377 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
360
378
361 duration = util.timer() - starttime
379 duration = util.timer() - starttime
362 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
380 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
363 repo.filtername or b'None', duration)
381 repo.filtername or b'None', duration)
364
382
365 self.write(repo)
383 self.write(repo)
366
384
367
385
368 class remotebranchcache(branchcache):
386 class remotebranchcache(branchcache):
369 """Branchmap info for a remote connection, should not write locally"""
387 """Branchmap info for a remote connection, should not write locally"""
370 def write(self, repo):
388 def write(self, repo):
371 pass
389 pass
372
390
373
391
374 # Revision branch info cache
392 # Revision branch info cache
375
393
376 _rbcversion = '-v1'
394 _rbcversion = '-v1'
377 _rbcnames = 'rbc-names' + _rbcversion
395 _rbcnames = 'rbc-names' + _rbcversion
378 _rbcrevs = 'rbc-revs' + _rbcversion
396 _rbcrevs = 'rbc-revs' + _rbcversion
379 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
397 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
380 _rbcrecfmt = '>4sI'
398 _rbcrecfmt = '>4sI'
381 _rbcrecsize = calcsize(_rbcrecfmt)
399 _rbcrecsize = calcsize(_rbcrecfmt)
382 _rbcnodelen = 4
400 _rbcnodelen = 4
383 _rbcbranchidxmask = 0x7fffffff
401 _rbcbranchidxmask = 0x7fffffff
384 _rbccloseflag = 0x80000000
402 _rbccloseflag = 0x80000000
385
403
386 class revbranchcache(object):
404 class revbranchcache(object):
387 """Persistent cache, mapping from revision number to branch name and close.
405 """Persistent cache, mapping from revision number to branch name and close.
388 This is a low level cache, independent of filtering.
406 This is a low level cache, independent of filtering.
389
407
390 Branch names are stored in rbc-names in internal encoding separated by 0.
408 Branch names are stored in rbc-names in internal encoding separated by 0.
391 rbc-names is append-only, and each branch name is only stored once and will
409 rbc-names is append-only, and each branch name is only stored once and will
392 thus have a unique index.
410 thus have a unique index.
393
411
394 The branch info for each revision is stored in rbc-revs as constant size
412 The branch info for each revision is stored in rbc-revs as constant size
395 records. The whole file is read into memory, but it is only 'parsed' on
413 records. The whole file is read into memory, but it is only 'parsed' on
396 demand. The file is usually append-only but will be truncated if repo
414 demand. The file is usually append-only but will be truncated if repo
397 modification is detected.
415 modification is detected.
398 The record for each revision contains the first 4 bytes of the
416 The record for each revision contains the first 4 bytes of the
399 corresponding node hash, and the record is only used if it still matches.
417 corresponding node hash, and the record is only used if it still matches.
400 Even a completely trashed rbc-revs fill thus still give the right result
418 Even a completely trashed rbc-revs fill thus still give the right result
401 while converging towards full recovery ... assuming no incorrectly matching
419 while converging towards full recovery ... assuming no incorrectly matching
402 node hashes.
420 node hashes.
403 The record also contains 4 bytes where 31 bits contains the index of the
421 The record also contains 4 bytes where 31 bits contains the index of the
404 branch and the last bit indicate that it is a branch close commit.
422 branch and the last bit indicate that it is a branch close commit.
405 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
423 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
406 and will grow with it but be 1/8th of its size.
424 and will grow with it but be 1/8th of its size.
407 """
425 """
408
426
409 def __init__(self, repo, readonly=True):
427 def __init__(self, repo, readonly=True):
410 assert repo.filtername is None
428 assert repo.filtername is None
411 self._repo = repo
429 self._repo = repo
412 self._names = [] # branch names in local encoding with static index
430 self._names = [] # branch names in local encoding with static index
413 self._rbcrevs = bytearray()
431 self._rbcrevs = bytearray()
414 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
432 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
415 try:
433 try:
416 bndata = repo.cachevfs.read(_rbcnames)
434 bndata = repo.cachevfs.read(_rbcnames)
417 self._rbcsnameslen = len(bndata) # for verification before writing
435 self._rbcsnameslen = len(bndata) # for verification before writing
418 if bndata:
436 if bndata:
419 self._names = [encoding.tolocal(bn)
437 self._names = [encoding.tolocal(bn)
420 for bn in bndata.split('\0')]
438 for bn in bndata.split('\0')]
421 except (IOError, OSError):
439 except (IOError, OSError):
422 if readonly:
440 if readonly:
423 # don't try to use cache - fall back to the slow path
441 # don't try to use cache - fall back to the slow path
424 self.branchinfo = self._branchinfo
442 self.branchinfo = self._branchinfo
425
443
426 if self._names:
444 if self._names:
427 try:
445 try:
428 data = repo.cachevfs.read(_rbcrevs)
446 data = repo.cachevfs.read(_rbcrevs)
429 self._rbcrevs[:] = data
447 self._rbcrevs[:] = data
430 except (IOError, OSError) as inst:
448 except (IOError, OSError) as inst:
431 repo.ui.debug("couldn't read revision branch cache: %s\n" %
449 repo.ui.debug("couldn't read revision branch cache: %s\n" %
432 stringutil.forcebytestr(inst))
450 stringutil.forcebytestr(inst))
433 # remember number of good records on disk
451 # remember number of good records on disk
434 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
452 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
435 len(repo.changelog))
453 len(repo.changelog))
436 if self._rbcrevslen == 0:
454 if self._rbcrevslen == 0:
437 self._names = []
455 self._names = []
438 self._rbcnamescount = len(self._names) # number of names read at
456 self._rbcnamescount = len(self._names) # number of names read at
439 # _rbcsnameslen
457 # _rbcsnameslen
440
458
441 def _clear(self):
459 def _clear(self):
442 self._rbcsnameslen = 0
460 self._rbcsnameslen = 0
443 del self._names[:]
461 del self._names[:]
444 self._rbcnamescount = 0
462 self._rbcnamescount = 0
445 self._rbcrevslen = len(self._repo.changelog)
463 self._rbcrevslen = len(self._repo.changelog)
446 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
464 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
447 util.clearcachedproperty(self, '_namesreverse')
465 util.clearcachedproperty(self, '_namesreverse')
448
466
449 @util.propertycache
467 @util.propertycache
450 def _namesreverse(self):
468 def _namesreverse(self):
451 return dict((b, r) for r, b in enumerate(self._names))
469 return dict((b, r) for r, b in enumerate(self._names))
452
470
453 def branchinfo(self, rev):
471 def branchinfo(self, rev):
454 """Return branch name and close flag for rev, using and updating
472 """Return branch name and close flag for rev, using and updating
455 persistent cache."""
473 persistent cache."""
456 changelog = self._repo.changelog
474 changelog = self._repo.changelog
457 rbcrevidx = rev * _rbcrecsize
475 rbcrevidx = rev * _rbcrecsize
458
476
459 # avoid negative index, changelog.read(nullrev) is fast without cache
477 # avoid negative index, changelog.read(nullrev) is fast without cache
460 if rev == nullrev:
478 if rev == nullrev:
461 return changelog.branchinfo(rev)
479 return changelog.branchinfo(rev)
462
480
463 # if requested rev isn't allocated, grow and cache the rev info
481 # if requested rev isn't allocated, grow and cache the rev info
464 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
482 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
465 return self._branchinfo(rev)
483 return self._branchinfo(rev)
466
484
467 # fast path: extract data from cache, use it if node is matching
485 # fast path: extract data from cache, use it if node is matching
468 reponode = changelog.node(rev)[:_rbcnodelen]
486 reponode = changelog.node(rev)[:_rbcnodelen]
469 cachenode, branchidx = unpack_from(
487 cachenode, branchidx = unpack_from(
470 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
488 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
471 close = bool(branchidx & _rbccloseflag)
489 close = bool(branchidx & _rbccloseflag)
472 if close:
490 if close:
473 branchidx &= _rbcbranchidxmask
491 branchidx &= _rbcbranchidxmask
474 if cachenode == '\0\0\0\0':
492 if cachenode == '\0\0\0\0':
475 pass
493 pass
476 elif cachenode == reponode:
494 elif cachenode == reponode:
477 try:
495 try:
478 return self._names[branchidx], close
496 return self._names[branchidx], close
479 except IndexError:
497 except IndexError:
480 # recover from invalid reference to unknown branch
498 # recover from invalid reference to unknown branch
481 self._repo.ui.debug("referenced branch names not found"
499 self._repo.ui.debug("referenced branch names not found"
482 " - rebuilding revision branch cache from scratch\n")
500 " - rebuilding revision branch cache from scratch\n")
483 self._clear()
501 self._clear()
484 else:
502 else:
485 # rev/node map has changed, invalidate the cache from here up
503 # rev/node map has changed, invalidate the cache from here up
486 self._repo.ui.debug("history modification detected - truncating "
504 self._repo.ui.debug("history modification detected - truncating "
487 "revision branch cache to revision %d\n" % rev)
505 "revision branch cache to revision %d\n" % rev)
488 truncate = rbcrevidx + _rbcrecsize
506 truncate = rbcrevidx + _rbcrecsize
489 del self._rbcrevs[truncate:]
507 del self._rbcrevs[truncate:]
490 self._rbcrevslen = min(self._rbcrevslen, truncate)
508 self._rbcrevslen = min(self._rbcrevslen, truncate)
491
509
492 # fall back to slow path and make sure it will be written to disk
510 # fall back to slow path and make sure it will be written to disk
493 return self._branchinfo(rev)
511 return self._branchinfo(rev)
494
512
495 def _branchinfo(self, rev):
513 def _branchinfo(self, rev):
496 """Retrieve branch info from changelog and update _rbcrevs"""
514 """Retrieve branch info from changelog and update _rbcrevs"""
497 changelog = self._repo.changelog
515 changelog = self._repo.changelog
498 b, close = changelog.branchinfo(rev)
516 b, close = changelog.branchinfo(rev)
499 if b in self._namesreverse:
517 if b in self._namesreverse:
500 branchidx = self._namesreverse[b]
518 branchidx = self._namesreverse[b]
501 else:
519 else:
502 branchidx = len(self._names)
520 branchidx = len(self._names)
503 self._names.append(b)
521 self._names.append(b)
504 self._namesreverse[b] = branchidx
522 self._namesreverse[b] = branchidx
505 reponode = changelog.node(rev)
523 reponode = changelog.node(rev)
506 if close:
524 if close:
507 branchidx |= _rbccloseflag
525 branchidx |= _rbccloseflag
508 self._setcachedata(rev, reponode, branchidx)
526 self._setcachedata(rev, reponode, branchidx)
509 return b, close
527 return b, close
510
528
511 def setdata(self, branch, rev, node, close):
529 def setdata(self, branch, rev, node, close):
512 """add new data information to the cache"""
530 """add new data information to the cache"""
513 if branch in self._namesreverse:
531 if branch in self._namesreverse:
514 branchidx = self._namesreverse[branch]
532 branchidx = self._namesreverse[branch]
515 else:
533 else:
516 branchidx = len(self._names)
534 branchidx = len(self._names)
517 self._names.append(branch)
535 self._names.append(branch)
518 self._namesreverse[branch] = branchidx
536 self._namesreverse[branch] = branchidx
519 if close:
537 if close:
520 branchidx |= _rbccloseflag
538 branchidx |= _rbccloseflag
521 self._setcachedata(rev, node, branchidx)
539 self._setcachedata(rev, node, branchidx)
522 # If no cache data were readable (non exists, bad permission, etc)
540 # If no cache data were readable (non exists, bad permission, etc)
523 # the cache was bypassing itself by setting:
541 # the cache was bypassing itself by setting:
524 #
542 #
525 # self.branchinfo = self._branchinfo
543 # self.branchinfo = self._branchinfo
526 #
544 #
527 # Since we now have data in the cache, we need to drop this bypassing.
545 # Since we now have data in the cache, we need to drop this bypassing.
528 if r'branchinfo' in vars(self):
546 if r'branchinfo' in vars(self):
529 del self.branchinfo
547 del self.branchinfo
530
548
531 def _setcachedata(self, rev, node, branchidx):
549 def _setcachedata(self, rev, node, branchidx):
532 """Writes the node's branch data to the in-memory cache data."""
550 """Writes the node's branch data to the in-memory cache data."""
533 if rev == nullrev:
551 if rev == nullrev:
534 return
552 return
535 rbcrevidx = rev * _rbcrecsize
553 rbcrevidx = rev * _rbcrecsize
536 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
554 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
537 self._rbcrevs.extend('\0' *
555 self._rbcrevs.extend('\0' *
538 (len(self._repo.changelog) * _rbcrecsize -
556 (len(self._repo.changelog) * _rbcrecsize -
539 len(self._rbcrevs)))
557 len(self._rbcrevs)))
540 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
558 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
541 self._rbcrevslen = min(self._rbcrevslen, rev)
559 self._rbcrevslen = min(self._rbcrevslen, rev)
542
560
543 tr = self._repo.currenttransaction()
561 tr = self._repo.currenttransaction()
544 if tr:
562 if tr:
545 tr.addfinalize('write-revbranchcache', self.write)
563 tr.addfinalize('write-revbranchcache', self.write)
546
564
547 def write(self, tr=None):
565 def write(self, tr=None):
548 """Save branch cache if it is dirty."""
566 """Save branch cache if it is dirty."""
549 repo = self._repo
567 repo = self._repo
550 wlock = None
568 wlock = None
551 step = ''
569 step = ''
552 try:
570 try:
553 if self._rbcnamescount < len(self._names):
571 if self._rbcnamescount < len(self._names):
554 step = ' names'
572 step = ' names'
555 wlock = repo.wlock(wait=False)
573 wlock = repo.wlock(wait=False)
556 if self._rbcnamescount != 0:
574 if self._rbcnamescount != 0:
557 f = repo.cachevfs.open(_rbcnames, 'ab')
575 f = repo.cachevfs.open(_rbcnames, 'ab')
558 if f.tell() == self._rbcsnameslen:
576 if f.tell() == self._rbcsnameslen:
559 f.write('\0')
577 f.write('\0')
560 else:
578 else:
561 f.close()
579 f.close()
562 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
580 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
563 self._rbcnamescount = 0
581 self._rbcnamescount = 0
564 self._rbcrevslen = 0
582 self._rbcrevslen = 0
565 if self._rbcnamescount == 0:
583 if self._rbcnamescount == 0:
566 # before rewriting names, make sure references are removed
584 # before rewriting names, make sure references are removed
567 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
585 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
568 f = repo.cachevfs.open(_rbcnames, 'wb')
586 f = repo.cachevfs.open(_rbcnames, 'wb')
569 f.write('\0'.join(encoding.fromlocal(b)
587 f.write('\0'.join(encoding.fromlocal(b)
570 for b in self._names[self._rbcnamescount:]))
588 for b in self._names[self._rbcnamescount:]))
571 self._rbcsnameslen = f.tell()
589 self._rbcsnameslen = f.tell()
572 f.close()
590 f.close()
573 self._rbcnamescount = len(self._names)
591 self._rbcnamescount = len(self._names)
574
592
575 start = self._rbcrevslen * _rbcrecsize
593 start = self._rbcrevslen * _rbcrecsize
576 if start != len(self._rbcrevs):
594 if start != len(self._rbcrevs):
577 step = ''
595 step = ''
578 if wlock is None:
596 if wlock is None:
579 wlock = repo.wlock(wait=False)
597 wlock = repo.wlock(wait=False)
580 revs = min(len(repo.changelog),
598 revs = min(len(repo.changelog),
581 len(self._rbcrevs) // _rbcrecsize)
599 len(self._rbcrevs) // _rbcrecsize)
582 f = repo.cachevfs.open(_rbcrevs, 'ab')
600 f = repo.cachevfs.open(_rbcrevs, 'ab')
583 if f.tell() != start:
601 if f.tell() != start:
584 repo.ui.debug("truncating cache/%s to %d\n"
602 repo.ui.debug("truncating cache/%s to %d\n"
585 % (_rbcrevs, start))
603 % (_rbcrevs, start))
586 f.seek(start)
604 f.seek(start)
587 if f.tell() != start:
605 if f.tell() != start:
588 start = 0
606 start = 0
589 f.seek(start)
607 f.seek(start)
590 f.truncate()
608 f.truncate()
591 end = revs * _rbcrecsize
609 end = revs * _rbcrecsize
592 f.write(self._rbcrevs[start:end])
610 f.write(self._rbcrevs[start:end])
593 f.close()
611 f.close()
594 self._rbcrevslen = revs
612 self._rbcrevslen = revs
595 except (IOError, OSError, error.Abort, error.LockError) as inst:
613 except (IOError, OSError, error.Abort, error.LockError) as inst:
596 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
614 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
597 % (step, stringutil.forcebytestr(inst)))
615 % (step, stringutil.forcebytestr(inst)))
598 finally:
616 finally:
599 if wlock is not None:
617 if wlock is not None:
600 wlock.release()
618 wlock.release()
@@ -1,3092 +1,3092
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
756 lazydeltabase = False
756 lazydeltabase = False
757 if lazydelta:
757 if lazydelta:
758 lazydeltabase = ui.configbool(b'storage',
758 lazydeltabase = ui.configbool(b'storage',
759 b'revlog.reuse-external-delta-parent')
759 b'revlog.reuse-external-delta-parent')
760 if lazydeltabase is None:
760 if lazydeltabase is None:
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
762 options[b'lazydelta'] = lazydelta
762 options[b'lazydelta'] = lazydelta
763 options[b'lazydeltabase'] = lazydeltabase
763 options[b'lazydeltabase'] = lazydeltabase
764
764
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
766 if 0 <= chainspan:
766 if 0 <= chainspan:
767 options[b'maxdeltachainspan'] = chainspan
767 options[b'maxdeltachainspan'] = chainspan
768
768
769 mmapindexthreshold = ui.configbytes(b'experimental',
769 mmapindexthreshold = ui.configbytes(b'experimental',
770 b'mmapindexthreshold')
770 b'mmapindexthreshold')
771 if mmapindexthreshold is not None:
771 if mmapindexthreshold is not None:
772 options[b'mmapindexthreshold'] = mmapindexthreshold
772 options[b'mmapindexthreshold'] = mmapindexthreshold
773
773
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
775 srdensitythres = float(ui.config(b'experimental',
775 srdensitythres = float(ui.config(b'experimental',
776 b'sparse-read.density-threshold'))
776 b'sparse-read.density-threshold'))
777 srmingapsize = ui.configbytes(b'experimental',
777 srmingapsize = ui.configbytes(b'experimental',
778 b'sparse-read.min-gap-size')
778 b'sparse-read.min-gap-size')
779 options[b'with-sparse-read'] = withsparseread
779 options[b'with-sparse-read'] = withsparseread
780 options[b'sparse-read-density-threshold'] = srdensitythres
780 options[b'sparse-read-density-threshold'] = srdensitythres
781 options[b'sparse-read-min-gap-size'] = srmingapsize
781 options[b'sparse-read-min-gap-size'] = srmingapsize
782
782
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
784 options[b'sparse-revlog'] = sparserevlog
784 options[b'sparse-revlog'] = sparserevlog
785 if sparserevlog:
785 if sparserevlog:
786 options[b'generaldelta'] = True
786 options[b'generaldelta'] = True
787
787
788 maxchainlen = None
788 maxchainlen = None
789 if sparserevlog:
789 if sparserevlog:
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
791 # experimental config: format.maxchainlen
791 # experimental config: format.maxchainlen
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
793 if maxchainlen is not None:
793 if maxchainlen is not None:
794 options[b'maxchainlen'] = maxchainlen
794 options[b'maxchainlen'] = maxchainlen
795
795
796 for r in requirements:
796 for r in requirements:
797 if r.startswith(b'exp-compression-'):
797 if r.startswith(b'exp-compression-'):
798 options[b'compengine'] = r[len(b'exp-compression-'):]
798 options[b'compengine'] = r[len(b'exp-compression-'):]
799
799
800 if repository.NARROW_REQUIREMENT in requirements:
800 if repository.NARROW_REQUIREMENT in requirements:
801 options[b'enableellipsis'] = True
801 options[b'enableellipsis'] = True
802
802
803 return options
803 return options
804
804
805 def makemain(**kwargs):
805 def makemain(**kwargs):
806 """Produce a type conforming to ``ilocalrepositorymain``."""
806 """Produce a type conforming to ``ilocalrepositorymain``."""
807 return localrepository
807 return localrepository
808
808
809 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
809 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
810 class revlogfilestorage(object):
810 class revlogfilestorage(object):
811 """File storage when using revlogs."""
811 """File storage when using revlogs."""
812
812
813 def file(self, path):
813 def file(self, path):
814 if path[0] == b'/':
814 if path[0] == b'/':
815 path = path[1:]
815 path = path[1:]
816
816
817 return filelog.filelog(self.svfs, path)
817 return filelog.filelog(self.svfs, path)
818
818
819 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
819 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
820 class revlognarrowfilestorage(object):
820 class revlognarrowfilestorage(object):
821 """File storage when using revlogs and narrow files."""
821 """File storage when using revlogs and narrow files."""
822
822
823 def file(self, path):
823 def file(self, path):
824 if path[0] == b'/':
824 if path[0] == b'/':
825 path = path[1:]
825 path = path[1:]
826
826
827 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
827 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
828
828
829 def makefilestorage(requirements, features, **kwargs):
829 def makefilestorage(requirements, features, **kwargs):
830 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
830 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
831 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
831 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
832 features.add(repository.REPO_FEATURE_STREAM_CLONE)
832 features.add(repository.REPO_FEATURE_STREAM_CLONE)
833
833
834 if repository.NARROW_REQUIREMENT in requirements:
834 if repository.NARROW_REQUIREMENT in requirements:
835 return revlognarrowfilestorage
835 return revlognarrowfilestorage
836 else:
836 else:
837 return revlogfilestorage
837 return revlogfilestorage
838
838
839 # List of repository interfaces and factory functions for them. Each
839 # List of repository interfaces and factory functions for them. Each
840 # will be called in order during ``makelocalrepository()`` to iteratively
840 # will be called in order during ``makelocalrepository()`` to iteratively
841 # derive the final type for a local repository instance. We capture the
841 # derive the final type for a local repository instance. We capture the
842 # function as a lambda so we don't hold a reference and the module-level
842 # function as a lambda so we don't hold a reference and the module-level
843 # functions can be wrapped.
843 # functions can be wrapped.
844 REPO_INTERFACES = [
844 REPO_INTERFACES = [
845 (repository.ilocalrepositorymain, lambda: makemain),
845 (repository.ilocalrepositorymain, lambda: makemain),
846 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
846 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
847 ]
847 ]
848
848
849 @interfaceutil.implementer(repository.ilocalrepositorymain)
849 @interfaceutil.implementer(repository.ilocalrepositorymain)
850 class localrepository(object):
850 class localrepository(object):
851 """Main class for representing local repositories.
851 """Main class for representing local repositories.
852
852
853 All local repositories are instances of this class.
853 All local repositories are instances of this class.
854
854
855 Constructed on its own, instances of this class are not usable as
855 Constructed on its own, instances of this class are not usable as
856 repository objects. To obtain a usable repository object, call
856 repository objects. To obtain a usable repository object, call
857 ``hg.repository()``, ``localrepo.instance()``, or
857 ``hg.repository()``, ``localrepo.instance()``, or
858 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
858 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
859 ``instance()`` adds support for creating new repositories.
859 ``instance()`` adds support for creating new repositories.
860 ``hg.repository()`` adds more extension integration, including calling
860 ``hg.repository()`` adds more extension integration, including calling
861 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
861 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
862 used.
862 used.
863 """
863 """
864
864
865 # obsolete experimental requirements:
865 # obsolete experimental requirements:
866 # - manifestv2: An experimental new manifest format that allowed
866 # - manifestv2: An experimental new manifest format that allowed
867 # for stem compression of long paths. Experiment ended up not
867 # for stem compression of long paths. Experiment ended up not
868 # being successful (repository sizes went up due to worse delta
868 # being successful (repository sizes went up due to worse delta
869 # chains), and the code was deleted in 4.6.
869 # chains), and the code was deleted in 4.6.
870 supportedformats = {
870 supportedformats = {
871 'revlogv1',
871 'revlogv1',
872 'generaldelta',
872 'generaldelta',
873 'treemanifest',
873 'treemanifest',
874 REVLOGV2_REQUIREMENT,
874 REVLOGV2_REQUIREMENT,
875 SPARSEREVLOG_REQUIREMENT,
875 SPARSEREVLOG_REQUIREMENT,
876 }
876 }
877 _basesupported = supportedformats | {
877 _basesupported = supportedformats | {
878 'store',
878 'store',
879 'fncache',
879 'fncache',
880 'shared',
880 'shared',
881 'relshared',
881 'relshared',
882 'dotencode',
882 'dotencode',
883 'exp-sparse',
883 'exp-sparse',
884 'internal-phase'
884 'internal-phase'
885 }
885 }
886
886
887 # list of prefix for file which can be written without 'wlock'
887 # list of prefix for file which can be written without 'wlock'
888 # Extensions should extend this list when needed
888 # Extensions should extend this list when needed
889 _wlockfreeprefix = {
889 _wlockfreeprefix = {
890 # We migh consider requiring 'wlock' for the next
890 # We migh consider requiring 'wlock' for the next
891 # two, but pretty much all the existing code assume
891 # two, but pretty much all the existing code assume
892 # wlock is not needed so we keep them excluded for
892 # wlock is not needed so we keep them excluded for
893 # now.
893 # now.
894 'hgrc',
894 'hgrc',
895 'requires',
895 'requires',
896 # XXX cache is a complicatged business someone
896 # XXX cache is a complicatged business someone
897 # should investigate this in depth at some point
897 # should investigate this in depth at some point
898 'cache/',
898 'cache/',
899 # XXX shouldn't be dirstate covered by the wlock?
899 # XXX shouldn't be dirstate covered by the wlock?
900 'dirstate',
900 'dirstate',
901 # XXX bisect was still a bit too messy at the time
901 # XXX bisect was still a bit too messy at the time
902 # this changeset was introduced. Someone should fix
902 # this changeset was introduced. Someone should fix
903 # the remainig bit and drop this line
903 # the remainig bit and drop this line
904 'bisect.state',
904 'bisect.state',
905 }
905 }
906
906
907 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
907 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
908 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
908 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
909 features, intents=None):
909 features, intents=None):
910 """Create a new local repository instance.
910 """Create a new local repository instance.
911
911
912 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
912 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
913 or ``localrepo.makelocalrepository()`` for obtaining a new repository
913 or ``localrepo.makelocalrepository()`` for obtaining a new repository
914 object.
914 object.
915
915
916 Arguments:
916 Arguments:
917
917
918 baseui
918 baseui
919 ``ui.ui`` instance that ``ui`` argument was based off of.
919 ``ui.ui`` instance that ``ui`` argument was based off of.
920
920
921 ui
921 ui
922 ``ui.ui`` instance for use by the repository.
922 ``ui.ui`` instance for use by the repository.
923
923
924 origroot
924 origroot
925 ``bytes`` path to working directory root of this repository.
925 ``bytes`` path to working directory root of this repository.
926
926
927 wdirvfs
927 wdirvfs
928 ``vfs.vfs`` rooted at the working directory.
928 ``vfs.vfs`` rooted at the working directory.
929
929
930 hgvfs
930 hgvfs
931 ``vfs.vfs`` rooted at .hg/
931 ``vfs.vfs`` rooted at .hg/
932
932
933 requirements
933 requirements
934 ``set`` of bytestrings representing repository opening requirements.
934 ``set`` of bytestrings representing repository opening requirements.
935
935
936 supportedrequirements
936 supportedrequirements
937 ``set`` of bytestrings representing repository requirements that we
937 ``set`` of bytestrings representing repository requirements that we
938 know how to open. May be a supetset of ``requirements``.
938 know how to open. May be a supetset of ``requirements``.
939
939
940 sharedpath
940 sharedpath
941 ``bytes`` Defining path to storage base directory. Points to a
941 ``bytes`` Defining path to storage base directory. Points to a
942 ``.hg/`` directory somewhere.
942 ``.hg/`` directory somewhere.
943
943
944 store
944 store
945 ``store.basicstore`` (or derived) instance providing access to
945 ``store.basicstore`` (or derived) instance providing access to
946 versioned storage.
946 versioned storage.
947
947
948 cachevfs
948 cachevfs
949 ``vfs.vfs`` used for cache files.
949 ``vfs.vfs`` used for cache files.
950
950
951 wcachevfs
951 wcachevfs
952 ``vfs.vfs`` used for cache files related to the working copy.
952 ``vfs.vfs`` used for cache files related to the working copy.
953
953
954 features
954 features
955 ``set`` of bytestrings defining features/capabilities of this
955 ``set`` of bytestrings defining features/capabilities of this
956 instance.
956 instance.
957
957
958 intents
958 intents
959 ``set`` of system strings indicating what this repo will be used
959 ``set`` of system strings indicating what this repo will be used
960 for.
960 for.
961 """
961 """
962 self.baseui = baseui
962 self.baseui = baseui
963 self.ui = ui
963 self.ui = ui
964 self.origroot = origroot
964 self.origroot = origroot
965 # vfs rooted at working directory.
965 # vfs rooted at working directory.
966 self.wvfs = wdirvfs
966 self.wvfs = wdirvfs
967 self.root = wdirvfs.base
967 self.root = wdirvfs.base
968 # vfs rooted at .hg/. Used to access most non-store paths.
968 # vfs rooted at .hg/. Used to access most non-store paths.
969 self.vfs = hgvfs
969 self.vfs = hgvfs
970 self.path = hgvfs.base
970 self.path = hgvfs.base
971 self.requirements = requirements
971 self.requirements = requirements
972 self.supported = supportedrequirements
972 self.supported = supportedrequirements
973 self.sharedpath = sharedpath
973 self.sharedpath = sharedpath
974 self.store = store
974 self.store = store
975 self.cachevfs = cachevfs
975 self.cachevfs = cachevfs
976 self.wcachevfs = wcachevfs
976 self.wcachevfs = wcachevfs
977 self.features = features
977 self.features = features
978
978
979 self.filtername = None
979 self.filtername = None
980
980
981 if (self.ui.configbool('devel', 'all-warnings') or
981 if (self.ui.configbool('devel', 'all-warnings') or
982 self.ui.configbool('devel', 'check-locks')):
982 self.ui.configbool('devel', 'check-locks')):
983 self.vfs.audit = self._getvfsward(self.vfs.audit)
983 self.vfs.audit = self._getvfsward(self.vfs.audit)
984 # A list of callback to shape the phase if no data were found.
984 # A list of callback to shape the phase if no data were found.
985 # Callback are in the form: func(repo, roots) --> processed root.
985 # Callback are in the form: func(repo, roots) --> processed root.
986 # This list it to be filled by extension during repo setup
986 # This list it to be filled by extension during repo setup
987 self._phasedefaults = []
987 self._phasedefaults = []
988
988
989 color.setup(self.ui)
989 color.setup(self.ui)
990
990
991 self.spath = self.store.path
991 self.spath = self.store.path
992 self.svfs = self.store.vfs
992 self.svfs = self.store.vfs
993 self.sjoin = self.store.join
993 self.sjoin = self.store.join
994 if (self.ui.configbool('devel', 'all-warnings') or
994 if (self.ui.configbool('devel', 'all-warnings') or
995 self.ui.configbool('devel', 'check-locks')):
995 self.ui.configbool('devel', 'check-locks')):
996 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
996 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
997 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
997 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
998 else: # standard vfs
998 else: # standard vfs
999 self.svfs.audit = self._getsvfsward(self.svfs.audit)
999 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1000
1000
1001 self._dirstatevalidatewarned = False
1001 self._dirstatevalidatewarned = False
1002
1002
1003 self._branchcaches = branchmap.BranchMapCache()
1003 self._branchcaches = branchmap.BranchMapCache()
1004 self._revbranchcache = None
1004 self._revbranchcache = None
1005 self._filterpats = {}
1005 self._filterpats = {}
1006 self._datafilters = {}
1006 self._datafilters = {}
1007 self._transref = self._lockref = self._wlockref = None
1007 self._transref = self._lockref = self._wlockref = None
1008
1008
1009 # A cache for various files under .hg/ that tracks file changes,
1009 # A cache for various files under .hg/ that tracks file changes,
1010 # (used by the filecache decorator)
1010 # (used by the filecache decorator)
1011 #
1011 #
1012 # Maps a property name to its util.filecacheentry
1012 # Maps a property name to its util.filecacheentry
1013 self._filecache = {}
1013 self._filecache = {}
1014
1014
1015 # hold sets of revision to be filtered
1015 # hold sets of revision to be filtered
1016 # should be cleared when something might have changed the filter value:
1016 # should be cleared when something might have changed the filter value:
1017 # - new changesets,
1017 # - new changesets,
1018 # - phase change,
1018 # - phase change,
1019 # - new obsolescence marker,
1019 # - new obsolescence marker,
1020 # - working directory parent change,
1020 # - working directory parent change,
1021 # - bookmark changes
1021 # - bookmark changes
1022 self.filteredrevcache = {}
1022 self.filteredrevcache = {}
1023
1023
1024 # post-dirstate-status hooks
1024 # post-dirstate-status hooks
1025 self._postdsstatus = []
1025 self._postdsstatus = []
1026
1026
1027 # generic mapping between names and nodes
1027 # generic mapping between names and nodes
1028 self.names = namespaces.namespaces()
1028 self.names = namespaces.namespaces()
1029
1029
1030 # Key to signature value.
1030 # Key to signature value.
1031 self._sparsesignaturecache = {}
1031 self._sparsesignaturecache = {}
1032 # Signature to cached matcher instance.
1032 # Signature to cached matcher instance.
1033 self._sparsematchercache = {}
1033 self._sparsematchercache = {}
1034
1034
1035 def _getvfsward(self, origfunc):
1035 def _getvfsward(self, origfunc):
1036 """build a ward for self.vfs"""
1036 """build a ward for self.vfs"""
1037 rref = weakref.ref(self)
1037 rref = weakref.ref(self)
1038 def checkvfs(path, mode=None):
1038 def checkvfs(path, mode=None):
1039 ret = origfunc(path, mode=mode)
1039 ret = origfunc(path, mode=mode)
1040 repo = rref()
1040 repo = rref()
1041 if (repo is None
1041 if (repo is None
1042 or not util.safehasattr(repo, '_wlockref')
1042 or not util.safehasattr(repo, '_wlockref')
1043 or not util.safehasattr(repo, '_lockref')):
1043 or not util.safehasattr(repo, '_lockref')):
1044 return
1044 return
1045 if mode in (None, 'r', 'rb'):
1045 if mode in (None, 'r', 'rb'):
1046 return
1046 return
1047 if path.startswith(repo.path):
1047 if path.startswith(repo.path):
1048 # truncate name relative to the repository (.hg)
1048 # truncate name relative to the repository (.hg)
1049 path = path[len(repo.path) + 1:]
1049 path = path[len(repo.path) + 1:]
1050 if path.startswith('cache/'):
1050 if path.startswith('cache/'):
1051 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1051 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1052 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1052 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1053 if path.startswith('journal.') or path.startswith('undo.'):
1053 if path.startswith('journal.') or path.startswith('undo.'):
1054 # journal is covered by 'lock'
1054 # journal is covered by 'lock'
1055 if repo._currentlock(repo._lockref) is None:
1055 if repo._currentlock(repo._lockref) is None:
1056 repo.ui.develwarn('write with no lock: "%s"' % path,
1056 repo.ui.develwarn('write with no lock: "%s"' % path,
1057 stacklevel=3, config='check-locks')
1057 stacklevel=3, config='check-locks')
1058 elif repo._currentlock(repo._wlockref) is None:
1058 elif repo._currentlock(repo._wlockref) is None:
1059 # rest of vfs files are covered by 'wlock'
1059 # rest of vfs files are covered by 'wlock'
1060 #
1060 #
1061 # exclude special files
1061 # exclude special files
1062 for prefix in self._wlockfreeprefix:
1062 for prefix in self._wlockfreeprefix:
1063 if path.startswith(prefix):
1063 if path.startswith(prefix):
1064 return
1064 return
1065 repo.ui.develwarn('write with no wlock: "%s"' % path,
1065 repo.ui.develwarn('write with no wlock: "%s"' % path,
1066 stacklevel=3, config='check-locks')
1066 stacklevel=3, config='check-locks')
1067 return ret
1067 return ret
1068 return checkvfs
1068 return checkvfs
1069
1069
1070 def _getsvfsward(self, origfunc):
1070 def _getsvfsward(self, origfunc):
1071 """build a ward for self.svfs"""
1071 """build a ward for self.svfs"""
1072 rref = weakref.ref(self)
1072 rref = weakref.ref(self)
1073 def checksvfs(path, mode=None):
1073 def checksvfs(path, mode=None):
1074 ret = origfunc(path, mode=mode)
1074 ret = origfunc(path, mode=mode)
1075 repo = rref()
1075 repo = rref()
1076 if repo is None or not util.safehasattr(repo, '_lockref'):
1076 if repo is None or not util.safehasattr(repo, '_lockref'):
1077 return
1077 return
1078 if mode in (None, 'r', 'rb'):
1078 if mode in (None, 'r', 'rb'):
1079 return
1079 return
1080 if path.startswith(repo.sharedpath):
1080 if path.startswith(repo.sharedpath):
1081 # truncate name relative to the repository (.hg)
1081 # truncate name relative to the repository (.hg)
1082 path = path[len(repo.sharedpath) + 1:]
1082 path = path[len(repo.sharedpath) + 1:]
1083 if repo._currentlock(repo._lockref) is None:
1083 if repo._currentlock(repo._lockref) is None:
1084 repo.ui.develwarn('write with no lock: "%s"' % path,
1084 repo.ui.develwarn('write with no lock: "%s"' % path,
1085 stacklevel=4)
1085 stacklevel=4)
1086 return ret
1086 return ret
1087 return checksvfs
1087 return checksvfs
1088
1088
1089 def close(self):
1089 def close(self):
1090 self._writecaches()
1090 self._writecaches()
1091
1091
1092 def _writecaches(self):
1092 def _writecaches(self):
1093 if self._revbranchcache:
1093 if self._revbranchcache:
1094 self._revbranchcache.write()
1094 self._revbranchcache.write()
1095
1095
1096 def _restrictcapabilities(self, caps):
1096 def _restrictcapabilities(self, caps):
1097 if self.ui.configbool('experimental', 'bundle2-advertise'):
1097 if self.ui.configbool('experimental', 'bundle2-advertise'):
1098 caps = set(caps)
1098 caps = set(caps)
1099 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1099 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1100 role='client'))
1100 role='client'))
1101 caps.add('bundle2=' + urlreq.quote(capsblob))
1101 caps.add('bundle2=' + urlreq.quote(capsblob))
1102 return caps
1102 return caps
1103
1103
1104 def _writerequirements(self):
1104 def _writerequirements(self):
1105 scmutil.writerequires(self.vfs, self.requirements)
1105 scmutil.writerequires(self.vfs, self.requirements)
1106
1106
1107 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1107 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1108 # self -> auditor -> self._checknested -> self
1108 # self -> auditor -> self._checknested -> self
1109
1109
1110 @property
1110 @property
1111 def auditor(self):
1111 def auditor(self):
1112 # This is only used by context.workingctx.match in order to
1112 # This is only used by context.workingctx.match in order to
1113 # detect files in subrepos.
1113 # detect files in subrepos.
1114 return pathutil.pathauditor(self.root, callback=self._checknested)
1114 return pathutil.pathauditor(self.root, callback=self._checknested)
1115
1115
1116 @property
1116 @property
1117 def nofsauditor(self):
1117 def nofsauditor(self):
1118 # This is only used by context.basectx.match in order to detect
1118 # This is only used by context.basectx.match in order to detect
1119 # files in subrepos.
1119 # files in subrepos.
1120 return pathutil.pathauditor(self.root, callback=self._checknested,
1120 return pathutil.pathauditor(self.root, callback=self._checknested,
1121 realfs=False, cached=True)
1121 realfs=False, cached=True)
1122
1122
1123 def _checknested(self, path):
1123 def _checknested(self, path):
1124 """Determine if path is a legal nested repository."""
1124 """Determine if path is a legal nested repository."""
1125 if not path.startswith(self.root):
1125 if not path.startswith(self.root):
1126 return False
1126 return False
1127 subpath = path[len(self.root) + 1:]
1127 subpath = path[len(self.root) + 1:]
1128 normsubpath = util.pconvert(subpath)
1128 normsubpath = util.pconvert(subpath)
1129
1129
1130 # XXX: Checking against the current working copy is wrong in
1130 # XXX: Checking against the current working copy is wrong in
1131 # the sense that it can reject things like
1131 # the sense that it can reject things like
1132 #
1132 #
1133 # $ hg cat -r 10 sub/x.txt
1133 # $ hg cat -r 10 sub/x.txt
1134 #
1134 #
1135 # if sub/ is no longer a subrepository in the working copy
1135 # if sub/ is no longer a subrepository in the working copy
1136 # parent revision.
1136 # parent revision.
1137 #
1137 #
1138 # However, it can of course also allow things that would have
1138 # However, it can of course also allow things that would have
1139 # been rejected before, such as the above cat command if sub/
1139 # been rejected before, such as the above cat command if sub/
1140 # is a subrepository now, but was a normal directory before.
1140 # is a subrepository now, but was a normal directory before.
1141 # The old path auditor would have rejected by mistake since it
1141 # The old path auditor would have rejected by mistake since it
1142 # panics when it sees sub/.hg/.
1142 # panics when it sees sub/.hg/.
1143 #
1143 #
1144 # All in all, checking against the working copy seems sensible
1144 # All in all, checking against the working copy seems sensible
1145 # since we want to prevent access to nested repositories on
1145 # since we want to prevent access to nested repositories on
1146 # the filesystem *now*.
1146 # the filesystem *now*.
1147 ctx = self[None]
1147 ctx = self[None]
1148 parts = util.splitpath(subpath)
1148 parts = util.splitpath(subpath)
1149 while parts:
1149 while parts:
1150 prefix = '/'.join(parts)
1150 prefix = '/'.join(parts)
1151 if prefix in ctx.substate:
1151 if prefix in ctx.substate:
1152 if prefix == normsubpath:
1152 if prefix == normsubpath:
1153 return True
1153 return True
1154 else:
1154 else:
1155 sub = ctx.sub(prefix)
1155 sub = ctx.sub(prefix)
1156 return sub.checknested(subpath[len(prefix) + 1:])
1156 return sub.checknested(subpath[len(prefix) + 1:])
1157 else:
1157 else:
1158 parts.pop()
1158 parts.pop()
1159 return False
1159 return False
1160
1160
1161 def peer(self):
1161 def peer(self):
1162 return localpeer(self) # not cached to avoid reference cycle
1162 return localpeer(self) # not cached to avoid reference cycle
1163
1163
1164 def unfiltered(self):
1164 def unfiltered(self):
1165 """Return unfiltered version of the repository
1165 """Return unfiltered version of the repository
1166
1166
1167 Intended to be overwritten by filtered repo."""
1167 Intended to be overwritten by filtered repo."""
1168 return self
1168 return self
1169
1169
1170 def filtered(self, name, visibilityexceptions=None):
1170 def filtered(self, name, visibilityexceptions=None):
1171 """Return a filtered version of a repository"""
1171 """Return a filtered version of a repository"""
1172 cls = repoview.newtype(self.unfiltered().__class__)
1172 cls = repoview.newtype(self.unfiltered().__class__)
1173 return cls(self, name, visibilityexceptions)
1173 return cls(self, name, visibilityexceptions)
1174
1174
1175 @repofilecache('bookmarks', 'bookmarks.current')
1175 @repofilecache('bookmarks', 'bookmarks.current')
1176 def _bookmarks(self):
1176 def _bookmarks(self):
1177 return bookmarks.bmstore(self)
1177 return bookmarks.bmstore(self)
1178
1178
1179 @property
1179 @property
1180 def _activebookmark(self):
1180 def _activebookmark(self):
1181 return self._bookmarks.active
1181 return self._bookmarks.active
1182
1182
1183 # _phasesets depend on changelog. what we need is to call
1183 # _phasesets depend on changelog. what we need is to call
1184 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1184 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1185 # can't be easily expressed in filecache mechanism.
1185 # can't be easily expressed in filecache mechanism.
1186 @storecache('phaseroots', '00changelog.i')
1186 @storecache('phaseroots', '00changelog.i')
1187 def _phasecache(self):
1187 def _phasecache(self):
1188 return phases.phasecache(self, self._phasedefaults)
1188 return phases.phasecache(self, self._phasedefaults)
1189
1189
1190 @storecache('obsstore')
1190 @storecache('obsstore')
1191 def obsstore(self):
1191 def obsstore(self):
1192 return obsolete.makestore(self.ui, self)
1192 return obsolete.makestore(self.ui, self)
1193
1193
1194 @storecache('00changelog.i')
1194 @storecache('00changelog.i')
1195 def changelog(self):
1195 def changelog(self):
1196 return changelog.changelog(self.svfs,
1196 return changelog.changelog(self.svfs,
1197 trypending=txnutil.mayhavepending(self.root))
1197 trypending=txnutil.mayhavepending(self.root))
1198
1198
1199 @storecache('00manifest.i')
1199 @storecache('00manifest.i')
1200 def manifestlog(self):
1200 def manifestlog(self):
1201 rootstore = manifest.manifestrevlog(self.svfs)
1201 rootstore = manifest.manifestrevlog(self.svfs)
1202 return manifest.manifestlog(self.svfs, self, rootstore,
1202 return manifest.manifestlog(self.svfs, self, rootstore,
1203 self._storenarrowmatch)
1203 self._storenarrowmatch)
1204
1204
1205 @repofilecache('dirstate')
1205 @repofilecache('dirstate')
1206 def dirstate(self):
1206 def dirstate(self):
1207 return self._makedirstate()
1207 return self._makedirstate()
1208
1208
1209 def _makedirstate(self):
1209 def _makedirstate(self):
1210 """Extension point for wrapping the dirstate per-repo."""
1210 """Extension point for wrapping the dirstate per-repo."""
1211 sparsematchfn = lambda: sparse.matcher(self)
1211 sparsematchfn = lambda: sparse.matcher(self)
1212
1212
1213 return dirstate.dirstate(self.vfs, self.ui, self.root,
1213 return dirstate.dirstate(self.vfs, self.ui, self.root,
1214 self._dirstatevalidate, sparsematchfn)
1214 self._dirstatevalidate, sparsematchfn)
1215
1215
1216 def _dirstatevalidate(self, node):
1216 def _dirstatevalidate(self, node):
1217 try:
1217 try:
1218 self.changelog.rev(node)
1218 self.changelog.rev(node)
1219 return node
1219 return node
1220 except error.LookupError:
1220 except error.LookupError:
1221 if not self._dirstatevalidatewarned:
1221 if not self._dirstatevalidatewarned:
1222 self._dirstatevalidatewarned = True
1222 self._dirstatevalidatewarned = True
1223 self.ui.warn(_("warning: ignoring unknown"
1223 self.ui.warn(_("warning: ignoring unknown"
1224 " working parent %s!\n") % short(node))
1224 " working parent %s!\n") % short(node))
1225 return nullid
1225 return nullid
1226
1226
1227 @storecache(narrowspec.FILENAME)
1227 @storecache(narrowspec.FILENAME)
1228 def narrowpats(self):
1228 def narrowpats(self):
1229 """matcher patterns for this repository's narrowspec
1229 """matcher patterns for this repository's narrowspec
1230
1230
1231 A tuple of (includes, excludes).
1231 A tuple of (includes, excludes).
1232 """
1232 """
1233 return narrowspec.load(self)
1233 return narrowspec.load(self)
1234
1234
1235 @storecache(narrowspec.FILENAME)
1235 @storecache(narrowspec.FILENAME)
1236 def _storenarrowmatch(self):
1236 def _storenarrowmatch(self):
1237 if repository.NARROW_REQUIREMENT not in self.requirements:
1237 if repository.NARROW_REQUIREMENT not in self.requirements:
1238 return matchmod.always()
1238 return matchmod.always()
1239 include, exclude = self.narrowpats
1239 include, exclude = self.narrowpats
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1241
1241
1242 @storecache(narrowspec.FILENAME)
1242 @storecache(narrowspec.FILENAME)
1243 def _narrowmatch(self):
1243 def _narrowmatch(self):
1244 if repository.NARROW_REQUIREMENT not in self.requirements:
1244 if repository.NARROW_REQUIREMENT not in self.requirements:
1245 return matchmod.always()
1245 return matchmod.always()
1246 narrowspec.checkworkingcopynarrowspec(self)
1246 narrowspec.checkworkingcopynarrowspec(self)
1247 include, exclude = self.narrowpats
1247 include, exclude = self.narrowpats
1248 return narrowspec.match(self.root, include=include, exclude=exclude)
1248 return narrowspec.match(self.root, include=include, exclude=exclude)
1249
1249
1250 def narrowmatch(self, match=None, includeexact=False):
1250 def narrowmatch(self, match=None, includeexact=False):
1251 """matcher corresponding the the repo's narrowspec
1251 """matcher corresponding the the repo's narrowspec
1252
1252
1253 If `match` is given, then that will be intersected with the narrow
1253 If `match` is given, then that will be intersected with the narrow
1254 matcher.
1254 matcher.
1255
1255
1256 If `includeexact` is True, then any exact matches from `match` will
1256 If `includeexact` is True, then any exact matches from `match` will
1257 be included even if they're outside the narrowspec.
1257 be included even if they're outside the narrowspec.
1258 """
1258 """
1259 if match:
1259 if match:
1260 if includeexact and not self._narrowmatch.always():
1260 if includeexact and not self._narrowmatch.always():
1261 # do not exclude explicitly-specified paths so that they can
1261 # do not exclude explicitly-specified paths so that they can
1262 # be warned later on
1262 # be warned later on
1263 em = matchmod.exact(match.files())
1263 em = matchmod.exact(match.files())
1264 nm = matchmod.unionmatcher([self._narrowmatch, em])
1264 nm = matchmod.unionmatcher([self._narrowmatch, em])
1265 return matchmod.intersectmatchers(match, nm)
1265 return matchmod.intersectmatchers(match, nm)
1266 return matchmod.intersectmatchers(match, self._narrowmatch)
1266 return matchmod.intersectmatchers(match, self._narrowmatch)
1267 return self._narrowmatch
1267 return self._narrowmatch
1268
1268
1269 def setnarrowpats(self, newincludes, newexcludes):
1269 def setnarrowpats(self, newincludes, newexcludes):
1270 narrowspec.save(self, newincludes, newexcludes)
1270 narrowspec.save(self, newincludes, newexcludes)
1271 self.invalidate(clearfilecache=True)
1271 self.invalidate(clearfilecache=True)
1272
1272
1273 def __getitem__(self, changeid):
1273 def __getitem__(self, changeid):
1274 if changeid is None:
1274 if changeid is None:
1275 return context.workingctx(self)
1275 return context.workingctx(self)
1276 if isinstance(changeid, context.basectx):
1276 if isinstance(changeid, context.basectx):
1277 return changeid
1277 return changeid
1278 if isinstance(changeid, slice):
1278 if isinstance(changeid, slice):
1279 # wdirrev isn't contiguous so the slice shouldn't include it
1279 # wdirrev isn't contiguous so the slice shouldn't include it
1280 return [self[i]
1280 return [self[i]
1281 for i in pycompat.xrange(*changeid.indices(len(self)))
1281 for i in pycompat.xrange(*changeid.indices(len(self)))
1282 if i not in self.changelog.filteredrevs]
1282 if i not in self.changelog.filteredrevs]
1283 try:
1283 try:
1284 if isinstance(changeid, int):
1284 if isinstance(changeid, int):
1285 node = self.changelog.node(changeid)
1285 node = self.changelog.node(changeid)
1286 rev = changeid
1286 rev = changeid
1287 elif changeid == 'null':
1287 elif changeid == 'null':
1288 node = nullid
1288 node = nullid
1289 rev = nullrev
1289 rev = nullrev
1290 elif changeid == 'tip':
1290 elif changeid == 'tip':
1291 node = self.changelog.tip()
1291 node = self.changelog.tip()
1292 rev = self.changelog.rev(node)
1292 rev = self.changelog.rev(node)
1293 elif changeid == '.':
1293 elif changeid == '.':
1294 # this is a hack to delay/avoid loading obsmarkers
1294 # this is a hack to delay/avoid loading obsmarkers
1295 # when we know that '.' won't be hidden
1295 # when we know that '.' won't be hidden
1296 node = self.dirstate.p1()
1296 node = self.dirstate.p1()
1297 rev = self.unfiltered().changelog.rev(node)
1297 rev = self.unfiltered().changelog.rev(node)
1298 elif len(changeid) == 20:
1298 elif len(changeid) == 20:
1299 try:
1299 try:
1300 node = changeid
1300 node = changeid
1301 rev = self.changelog.rev(changeid)
1301 rev = self.changelog.rev(changeid)
1302 except error.FilteredLookupError:
1302 except error.FilteredLookupError:
1303 changeid = hex(changeid) # for the error message
1303 changeid = hex(changeid) # for the error message
1304 raise
1304 raise
1305 except LookupError:
1305 except LookupError:
1306 # check if it might have come from damaged dirstate
1306 # check if it might have come from damaged dirstate
1307 #
1307 #
1308 # XXX we could avoid the unfiltered if we had a recognizable
1308 # XXX we could avoid the unfiltered if we had a recognizable
1309 # exception for filtered changeset access
1309 # exception for filtered changeset access
1310 if (self.local()
1310 if (self.local()
1311 and changeid in self.unfiltered().dirstate.parents()):
1311 and changeid in self.unfiltered().dirstate.parents()):
1312 msg = _("working directory has unknown parent '%s'!")
1312 msg = _("working directory has unknown parent '%s'!")
1313 raise error.Abort(msg % short(changeid))
1313 raise error.Abort(msg % short(changeid))
1314 changeid = hex(changeid) # for the error message
1314 changeid = hex(changeid) # for the error message
1315 raise
1315 raise
1316
1316
1317 elif len(changeid) == 40:
1317 elif len(changeid) == 40:
1318 node = bin(changeid)
1318 node = bin(changeid)
1319 rev = self.changelog.rev(node)
1319 rev = self.changelog.rev(node)
1320 else:
1320 else:
1321 raise error.ProgrammingError(
1321 raise error.ProgrammingError(
1322 "unsupported changeid '%s' of type %s" %
1322 "unsupported changeid '%s' of type %s" %
1323 (changeid, type(changeid)))
1323 (changeid, type(changeid)))
1324
1324
1325 return context.changectx(self, rev, node)
1325 return context.changectx(self, rev, node)
1326
1326
1327 except (error.FilteredIndexError, error.FilteredLookupError):
1327 except (error.FilteredIndexError, error.FilteredLookupError):
1328 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1328 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1329 % pycompat.bytestr(changeid))
1329 % pycompat.bytestr(changeid))
1330 except (IndexError, LookupError):
1330 except (IndexError, LookupError):
1331 raise error.RepoLookupError(
1331 raise error.RepoLookupError(
1332 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1332 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1333 except error.WdirUnsupported:
1333 except error.WdirUnsupported:
1334 return context.workingctx(self)
1334 return context.workingctx(self)
1335
1335
1336 def __contains__(self, changeid):
1336 def __contains__(self, changeid):
1337 """True if the given changeid exists
1337 """True if the given changeid exists
1338
1338
1339 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1339 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1340 specified.
1340 specified.
1341 """
1341 """
1342 try:
1342 try:
1343 self[changeid]
1343 self[changeid]
1344 return True
1344 return True
1345 except error.RepoLookupError:
1345 except error.RepoLookupError:
1346 return False
1346 return False
1347
1347
1348 def __nonzero__(self):
1348 def __nonzero__(self):
1349 return True
1349 return True
1350
1350
1351 __bool__ = __nonzero__
1351 __bool__ = __nonzero__
1352
1352
1353 def __len__(self):
1353 def __len__(self):
1354 # no need to pay the cost of repoview.changelog
1354 # no need to pay the cost of repoview.changelog
1355 unfi = self.unfiltered()
1355 unfi = self.unfiltered()
1356 return len(unfi.changelog)
1356 return len(unfi.changelog)
1357
1357
1358 def __iter__(self):
1358 def __iter__(self):
1359 return iter(self.changelog)
1359 return iter(self.changelog)
1360
1360
1361 def revs(self, expr, *args):
1361 def revs(self, expr, *args):
1362 '''Find revisions matching a revset.
1362 '''Find revisions matching a revset.
1363
1363
1364 The revset is specified as a string ``expr`` that may contain
1364 The revset is specified as a string ``expr`` that may contain
1365 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1365 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1366
1366
1367 Revset aliases from the configuration are not expanded. To expand
1367 Revset aliases from the configuration are not expanded. To expand
1368 user aliases, consider calling ``scmutil.revrange()`` or
1368 user aliases, consider calling ``scmutil.revrange()`` or
1369 ``repo.anyrevs([expr], user=True)``.
1369 ``repo.anyrevs([expr], user=True)``.
1370
1370
1371 Returns a revset.abstractsmartset, which is a list-like interface
1371 Returns a revset.abstractsmartset, which is a list-like interface
1372 that contains integer revisions.
1372 that contains integer revisions.
1373 '''
1373 '''
1374 tree = revsetlang.spectree(expr, *args)
1374 tree = revsetlang.spectree(expr, *args)
1375 return revset.makematcher(tree)(self)
1375 return revset.makematcher(tree)(self)
1376
1376
1377 def set(self, expr, *args):
1377 def set(self, expr, *args):
1378 '''Find revisions matching a revset and emit changectx instances.
1378 '''Find revisions matching a revset and emit changectx instances.
1379
1379
1380 This is a convenience wrapper around ``revs()`` that iterates the
1380 This is a convenience wrapper around ``revs()`` that iterates the
1381 result and is a generator of changectx instances.
1381 result and is a generator of changectx instances.
1382
1382
1383 Revset aliases from the configuration are not expanded. To expand
1383 Revset aliases from the configuration are not expanded. To expand
1384 user aliases, consider calling ``scmutil.revrange()``.
1384 user aliases, consider calling ``scmutil.revrange()``.
1385 '''
1385 '''
1386 for r in self.revs(expr, *args):
1386 for r in self.revs(expr, *args):
1387 yield self[r]
1387 yield self[r]
1388
1388
1389 def anyrevs(self, specs, user=False, localalias=None):
1389 def anyrevs(self, specs, user=False, localalias=None):
1390 '''Find revisions matching one of the given revsets.
1390 '''Find revisions matching one of the given revsets.
1391
1391
1392 Revset aliases from the configuration are not expanded by default. To
1392 Revset aliases from the configuration are not expanded by default. To
1393 expand user aliases, specify ``user=True``. To provide some local
1393 expand user aliases, specify ``user=True``. To provide some local
1394 definitions overriding user aliases, set ``localalias`` to
1394 definitions overriding user aliases, set ``localalias`` to
1395 ``{name: definitionstring}``.
1395 ``{name: definitionstring}``.
1396 '''
1396 '''
1397 if user:
1397 if user:
1398 m = revset.matchany(self.ui, specs,
1398 m = revset.matchany(self.ui, specs,
1399 lookup=revset.lookupfn(self),
1399 lookup=revset.lookupfn(self),
1400 localalias=localalias)
1400 localalias=localalias)
1401 else:
1401 else:
1402 m = revset.matchany(None, specs, localalias=localalias)
1402 m = revset.matchany(None, specs, localalias=localalias)
1403 return m(self)
1403 return m(self)
1404
1404
1405 def url(self):
1405 def url(self):
1406 return 'file:' + self.root
1406 return 'file:' + self.root
1407
1407
1408 def hook(self, name, throw=False, **args):
1408 def hook(self, name, throw=False, **args):
1409 """Call a hook, passing this repo instance.
1409 """Call a hook, passing this repo instance.
1410
1410
1411 This a convenience method to aid invoking hooks. Extensions likely
1411 This a convenience method to aid invoking hooks. Extensions likely
1412 won't call this unless they have registered a custom hook or are
1412 won't call this unless they have registered a custom hook or are
1413 replacing code that is expected to call a hook.
1413 replacing code that is expected to call a hook.
1414 """
1414 """
1415 return hook.hook(self.ui, self, name, throw, **args)
1415 return hook.hook(self.ui, self, name, throw, **args)
1416
1416
1417 @filteredpropertycache
1417 @filteredpropertycache
1418 def _tagscache(self):
1418 def _tagscache(self):
1419 '''Returns a tagscache object that contains various tags related
1419 '''Returns a tagscache object that contains various tags related
1420 caches.'''
1420 caches.'''
1421
1421
1422 # This simplifies its cache management by having one decorated
1422 # This simplifies its cache management by having one decorated
1423 # function (this one) and the rest simply fetch things from it.
1423 # function (this one) and the rest simply fetch things from it.
1424 class tagscache(object):
1424 class tagscache(object):
1425 def __init__(self):
1425 def __init__(self):
1426 # These two define the set of tags for this repository. tags
1426 # These two define the set of tags for this repository. tags
1427 # maps tag name to node; tagtypes maps tag name to 'global' or
1427 # maps tag name to node; tagtypes maps tag name to 'global' or
1428 # 'local'. (Global tags are defined by .hgtags across all
1428 # 'local'. (Global tags are defined by .hgtags across all
1429 # heads, and local tags are defined in .hg/localtags.)
1429 # heads, and local tags are defined in .hg/localtags.)
1430 # They constitute the in-memory cache of tags.
1430 # They constitute the in-memory cache of tags.
1431 self.tags = self.tagtypes = None
1431 self.tags = self.tagtypes = None
1432
1432
1433 self.nodetagscache = self.tagslist = None
1433 self.nodetagscache = self.tagslist = None
1434
1434
1435 cache = tagscache()
1435 cache = tagscache()
1436 cache.tags, cache.tagtypes = self._findtags()
1436 cache.tags, cache.tagtypes = self._findtags()
1437
1437
1438 return cache
1438 return cache
1439
1439
1440 def tags(self):
1440 def tags(self):
1441 '''return a mapping of tag to node'''
1441 '''return a mapping of tag to node'''
1442 t = {}
1442 t = {}
1443 if self.changelog.filteredrevs:
1443 if self.changelog.filteredrevs:
1444 tags, tt = self._findtags()
1444 tags, tt = self._findtags()
1445 else:
1445 else:
1446 tags = self._tagscache.tags
1446 tags = self._tagscache.tags
1447 rev = self.changelog.rev
1447 rev = self.changelog.rev
1448 for k, v in tags.iteritems():
1448 for k, v in tags.iteritems():
1449 try:
1449 try:
1450 # ignore tags to unknown nodes
1450 # ignore tags to unknown nodes
1451 rev(v)
1451 rev(v)
1452 t[k] = v
1452 t[k] = v
1453 except (error.LookupError, ValueError):
1453 except (error.LookupError, ValueError):
1454 pass
1454 pass
1455 return t
1455 return t
1456
1456
1457 def _findtags(self):
1457 def _findtags(self):
1458 '''Do the hard work of finding tags. Return a pair of dicts
1458 '''Do the hard work of finding tags. Return a pair of dicts
1459 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1459 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1460 maps tag name to a string like \'global\' or \'local\'.
1460 maps tag name to a string like \'global\' or \'local\'.
1461 Subclasses or extensions are free to add their own tags, but
1461 Subclasses or extensions are free to add their own tags, but
1462 should be aware that the returned dicts will be retained for the
1462 should be aware that the returned dicts will be retained for the
1463 duration of the localrepo object.'''
1463 duration of the localrepo object.'''
1464
1464
1465 # XXX what tagtype should subclasses/extensions use? Currently
1465 # XXX what tagtype should subclasses/extensions use? Currently
1466 # mq and bookmarks add tags, but do not set the tagtype at all.
1466 # mq and bookmarks add tags, but do not set the tagtype at all.
1467 # Should each extension invent its own tag type? Should there
1467 # Should each extension invent its own tag type? Should there
1468 # be one tagtype for all such "virtual" tags? Or is the status
1468 # be one tagtype for all such "virtual" tags? Or is the status
1469 # quo fine?
1469 # quo fine?
1470
1470
1471
1471
1472 # map tag name to (node, hist)
1472 # map tag name to (node, hist)
1473 alltags = tagsmod.findglobaltags(self.ui, self)
1473 alltags = tagsmod.findglobaltags(self.ui, self)
1474 # map tag name to tag type
1474 # map tag name to tag type
1475 tagtypes = dict((tag, 'global') for tag in alltags)
1475 tagtypes = dict((tag, 'global') for tag in alltags)
1476
1476
1477 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1477 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1478
1478
1479 # Build the return dicts. Have to re-encode tag names because
1479 # Build the return dicts. Have to re-encode tag names because
1480 # the tags module always uses UTF-8 (in order not to lose info
1480 # the tags module always uses UTF-8 (in order not to lose info
1481 # writing to the cache), but the rest of Mercurial wants them in
1481 # writing to the cache), but the rest of Mercurial wants them in
1482 # local encoding.
1482 # local encoding.
1483 tags = {}
1483 tags = {}
1484 for (name, (node, hist)) in alltags.iteritems():
1484 for (name, (node, hist)) in alltags.iteritems():
1485 if node != nullid:
1485 if node != nullid:
1486 tags[encoding.tolocal(name)] = node
1486 tags[encoding.tolocal(name)] = node
1487 tags['tip'] = self.changelog.tip()
1487 tags['tip'] = self.changelog.tip()
1488 tagtypes = dict([(encoding.tolocal(name), value)
1488 tagtypes = dict([(encoding.tolocal(name), value)
1489 for (name, value) in tagtypes.iteritems()])
1489 for (name, value) in tagtypes.iteritems()])
1490 return (tags, tagtypes)
1490 return (tags, tagtypes)
1491
1491
1492 def tagtype(self, tagname):
1492 def tagtype(self, tagname):
1493 '''
1493 '''
1494 return the type of the given tag. result can be:
1494 return the type of the given tag. result can be:
1495
1495
1496 'local' : a local tag
1496 'local' : a local tag
1497 'global' : a global tag
1497 'global' : a global tag
1498 None : tag does not exist
1498 None : tag does not exist
1499 '''
1499 '''
1500
1500
1501 return self._tagscache.tagtypes.get(tagname)
1501 return self._tagscache.tagtypes.get(tagname)
1502
1502
1503 def tagslist(self):
1503 def tagslist(self):
1504 '''return a list of tags ordered by revision'''
1504 '''return a list of tags ordered by revision'''
1505 if not self._tagscache.tagslist:
1505 if not self._tagscache.tagslist:
1506 l = []
1506 l = []
1507 for t, n in self.tags().iteritems():
1507 for t, n in self.tags().iteritems():
1508 l.append((self.changelog.rev(n), t, n))
1508 l.append((self.changelog.rev(n), t, n))
1509 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1509 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1510
1510
1511 return self._tagscache.tagslist
1511 return self._tagscache.tagslist
1512
1512
1513 def nodetags(self, node):
1513 def nodetags(self, node):
1514 '''return the tags associated with a node'''
1514 '''return the tags associated with a node'''
1515 if not self._tagscache.nodetagscache:
1515 if not self._tagscache.nodetagscache:
1516 nodetagscache = {}
1516 nodetagscache = {}
1517 for t, n in self._tagscache.tags.iteritems():
1517 for t, n in self._tagscache.tags.iteritems():
1518 nodetagscache.setdefault(n, []).append(t)
1518 nodetagscache.setdefault(n, []).append(t)
1519 for tags in nodetagscache.itervalues():
1519 for tags in nodetagscache.itervalues():
1520 tags.sort()
1520 tags.sort()
1521 self._tagscache.nodetagscache = nodetagscache
1521 self._tagscache.nodetagscache = nodetagscache
1522 return self._tagscache.nodetagscache.get(node, [])
1522 return self._tagscache.nodetagscache.get(node, [])
1523
1523
1524 def nodebookmarks(self, node):
1524 def nodebookmarks(self, node):
1525 """return the list of bookmarks pointing to the specified node"""
1525 """return the list of bookmarks pointing to the specified node"""
1526 return self._bookmarks.names(node)
1526 return self._bookmarks.names(node)
1527
1527
1528 def branchmap(self):
1528 def branchmap(self):
1529 '''returns a dictionary {branch: [branchheads]} with branchheads
1529 '''returns a dictionary {branch: [branchheads]} with branchheads
1530 ordered by increasing revision number'''
1530 ordered by increasing revision number'''
1531 return self._branchcaches[self]
1531 return self._branchcaches[self]
1532
1532
1533 @unfilteredmethod
1533 @unfilteredmethod
1534 def revbranchcache(self):
1534 def revbranchcache(self):
1535 if not self._revbranchcache:
1535 if not self._revbranchcache:
1536 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1536 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1537 return self._revbranchcache
1537 return self._revbranchcache
1538
1538
1539 def branchtip(self, branch, ignoremissing=False):
1539 def branchtip(self, branch, ignoremissing=False):
1540 '''return the tip node for a given branch
1540 '''return the tip node for a given branch
1541
1541
1542 If ignoremissing is True, then this method will not raise an error.
1542 If ignoremissing is True, then this method will not raise an error.
1543 This is helpful for callers that only expect None for a missing branch
1543 This is helpful for callers that only expect None for a missing branch
1544 (e.g. namespace).
1544 (e.g. namespace).
1545
1545
1546 '''
1546 '''
1547 try:
1547 try:
1548 return self.branchmap().branchtip(branch)
1548 return self.branchmap().branchtip(branch)
1549 except KeyError:
1549 except KeyError:
1550 if not ignoremissing:
1550 if not ignoremissing:
1551 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1551 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1552 else:
1552 else:
1553 pass
1553 pass
1554
1554
1555 def lookup(self, key):
1555 def lookup(self, key):
1556 return scmutil.revsymbol(self, key).node()
1556 return scmutil.revsymbol(self, key).node()
1557
1557
1558 def lookupbranch(self, key):
1558 def lookupbranch(self, key):
1559 if key in self.branchmap():
1559 if key in self.branchmap().entries:
1560 return key
1560 return key
1561
1561
1562 return scmutil.revsymbol(self, key).branch()
1562 return scmutil.revsymbol(self, key).branch()
1563
1563
1564 def known(self, nodes):
1564 def known(self, nodes):
1565 cl = self.changelog
1565 cl = self.changelog
1566 nm = cl.nodemap
1566 nm = cl.nodemap
1567 filtered = cl.filteredrevs
1567 filtered = cl.filteredrevs
1568 result = []
1568 result = []
1569 for n in nodes:
1569 for n in nodes:
1570 r = nm.get(n)
1570 r = nm.get(n)
1571 resp = not (r is None or r in filtered)
1571 resp = not (r is None or r in filtered)
1572 result.append(resp)
1572 result.append(resp)
1573 return result
1573 return result
1574
1574
1575 def local(self):
1575 def local(self):
1576 return self
1576 return self
1577
1577
1578 def publishing(self):
1578 def publishing(self):
1579 # it's safe (and desirable) to trust the publish flag unconditionally
1579 # it's safe (and desirable) to trust the publish flag unconditionally
1580 # so that we don't finalize changes shared between users via ssh or nfs
1580 # so that we don't finalize changes shared between users via ssh or nfs
1581 return self.ui.configbool('phases', 'publish', untrusted=True)
1581 return self.ui.configbool('phases', 'publish', untrusted=True)
1582
1582
1583 def cancopy(self):
1583 def cancopy(self):
1584 # so statichttprepo's override of local() works
1584 # so statichttprepo's override of local() works
1585 if not self.local():
1585 if not self.local():
1586 return False
1586 return False
1587 if not self.publishing():
1587 if not self.publishing():
1588 return True
1588 return True
1589 # if publishing we can't copy if there is filtered content
1589 # if publishing we can't copy if there is filtered content
1590 return not self.filtered('visible').changelog.filteredrevs
1590 return not self.filtered('visible').changelog.filteredrevs
1591
1591
1592 def shared(self):
1592 def shared(self):
1593 '''the type of shared repository (None if not shared)'''
1593 '''the type of shared repository (None if not shared)'''
1594 if self.sharedpath != self.path:
1594 if self.sharedpath != self.path:
1595 return 'store'
1595 return 'store'
1596 return None
1596 return None
1597
1597
1598 def wjoin(self, f, *insidef):
1598 def wjoin(self, f, *insidef):
1599 return self.vfs.reljoin(self.root, f, *insidef)
1599 return self.vfs.reljoin(self.root, f, *insidef)
1600
1600
1601 def setparents(self, p1, p2=nullid):
1601 def setparents(self, p1, p2=nullid):
1602 with self.dirstate.parentchange():
1602 with self.dirstate.parentchange():
1603 copies = self.dirstate.setparents(p1, p2)
1603 copies = self.dirstate.setparents(p1, p2)
1604 pctx = self[p1]
1604 pctx = self[p1]
1605 if copies:
1605 if copies:
1606 # Adjust copy records, the dirstate cannot do it, it
1606 # Adjust copy records, the dirstate cannot do it, it
1607 # requires access to parents manifests. Preserve them
1607 # requires access to parents manifests. Preserve them
1608 # only for entries added to first parent.
1608 # only for entries added to first parent.
1609 for f in copies:
1609 for f in copies:
1610 if f not in pctx and copies[f] in pctx:
1610 if f not in pctx and copies[f] in pctx:
1611 self.dirstate.copy(copies[f], f)
1611 self.dirstate.copy(copies[f], f)
1612 if p2 == nullid:
1612 if p2 == nullid:
1613 for f, s in sorted(self.dirstate.copies().items()):
1613 for f, s in sorted(self.dirstate.copies().items()):
1614 if f not in pctx and s not in pctx:
1614 if f not in pctx and s not in pctx:
1615 self.dirstate.copy(None, f)
1615 self.dirstate.copy(None, f)
1616
1616
1617 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1617 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1618 """changeid must be a changeset revision, if specified.
1618 """changeid must be a changeset revision, if specified.
1619 fileid can be a file revision or node."""
1619 fileid can be a file revision or node."""
1620 return context.filectx(self, path, changeid, fileid,
1620 return context.filectx(self, path, changeid, fileid,
1621 changectx=changectx)
1621 changectx=changectx)
1622
1622
1623 def getcwd(self):
1623 def getcwd(self):
1624 return self.dirstate.getcwd()
1624 return self.dirstate.getcwd()
1625
1625
1626 def pathto(self, f, cwd=None):
1626 def pathto(self, f, cwd=None):
1627 return self.dirstate.pathto(f, cwd)
1627 return self.dirstate.pathto(f, cwd)
1628
1628
1629 def _loadfilter(self, filter):
1629 def _loadfilter(self, filter):
1630 if filter not in self._filterpats:
1630 if filter not in self._filterpats:
1631 l = []
1631 l = []
1632 for pat, cmd in self.ui.configitems(filter):
1632 for pat, cmd in self.ui.configitems(filter):
1633 if cmd == '!':
1633 if cmd == '!':
1634 continue
1634 continue
1635 mf = matchmod.match(self.root, '', [pat])
1635 mf = matchmod.match(self.root, '', [pat])
1636 fn = None
1636 fn = None
1637 params = cmd
1637 params = cmd
1638 for name, filterfn in self._datafilters.iteritems():
1638 for name, filterfn in self._datafilters.iteritems():
1639 if cmd.startswith(name):
1639 if cmd.startswith(name):
1640 fn = filterfn
1640 fn = filterfn
1641 params = cmd[len(name):].lstrip()
1641 params = cmd[len(name):].lstrip()
1642 break
1642 break
1643 if not fn:
1643 if not fn:
1644 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1644 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1645 # Wrap old filters not supporting keyword arguments
1645 # Wrap old filters not supporting keyword arguments
1646 if not pycompat.getargspec(fn)[2]:
1646 if not pycompat.getargspec(fn)[2]:
1647 oldfn = fn
1647 oldfn = fn
1648 fn = lambda s, c, **kwargs: oldfn(s, c)
1648 fn = lambda s, c, **kwargs: oldfn(s, c)
1649 l.append((mf, fn, params))
1649 l.append((mf, fn, params))
1650 self._filterpats[filter] = l
1650 self._filterpats[filter] = l
1651 return self._filterpats[filter]
1651 return self._filterpats[filter]
1652
1652
1653 def _filter(self, filterpats, filename, data):
1653 def _filter(self, filterpats, filename, data):
1654 for mf, fn, cmd in filterpats:
1654 for mf, fn, cmd in filterpats:
1655 if mf(filename):
1655 if mf(filename):
1656 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1656 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1657 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1657 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1658 break
1658 break
1659
1659
1660 return data
1660 return data
1661
1661
1662 @unfilteredpropertycache
1662 @unfilteredpropertycache
1663 def _encodefilterpats(self):
1663 def _encodefilterpats(self):
1664 return self._loadfilter('encode')
1664 return self._loadfilter('encode')
1665
1665
1666 @unfilteredpropertycache
1666 @unfilteredpropertycache
1667 def _decodefilterpats(self):
1667 def _decodefilterpats(self):
1668 return self._loadfilter('decode')
1668 return self._loadfilter('decode')
1669
1669
1670 def adddatafilter(self, name, filter):
1670 def adddatafilter(self, name, filter):
1671 self._datafilters[name] = filter
1671 self._datafilters[name] = filter
1672
1672
1673 def wread(self, filename):
1673 def wread(self, filename):
1674 if self.wvfs.islink(filename):
1674 if self.wvfs.islink(filename):
1675 data = self.wvfs.readlink(filename)
1675 data = self.wvfs.readlink(filename)
1676 else:
1676 else:
1677 data = self.wvfs.read(filename)
1677 data = self.wvfs.read(filename)
1678 return self._filter(self._encodefilterpats, filename, data)
1678 return self._filter(self._encodefilterpats, filename, data)
1679
1679
1680 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1680 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1681 """write ``data`` into ``filename`` in the working directory
1681 """write ``data`` into ``filename`` in the working directory
1682
1682
1683 This returns length of written (maybe decoded) data.
1683 This returns length of written (maybe decoded) data.
1684 """
1684 """
1685 data = self._filter(self._decodefilterpats, filename, data)
1685 data = self._filter(self._decodefilterpats, filename, data)
1686 if 'l' in flags:
1686 if 'l' in flags:
1687 self.wvfs.symlink(data, filename)
1687 self.wvfs.symlink(data, filename)
1688 else:
1688 else:
1689 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1689 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1690 **kwargs)
1690 **kwargs)
1691 if 'x' in flags:
1691 if 'x' in flags:
1692 self.wvfs.setflags(filename, False, True)
1692 self.wvfs.setflags(filename, False, True)
1693 else:
1693 else:
1694 self.wvfs.setflags(filename, False, False)
1694 self.wvfs.setflags(filename, False, False)
1695 return len(data)
1695 return len(data)
1696
1696
1697 def wwritedata(self, filename, data):
1697 def wwritedata(self, filename, data):
1698 return self._filter(self._decodefilterpats, filename, data)
1698 return self._filter(self._decodefilterpats, filename, data)
1699
1699
1700 def currenttransaction(self):
1700 def currenttransaction(self):
1701 """return the current transaction or None if non exists"""
1701 """return the current transaction or None if non exists"""
1702 if self._transref:
1702 if self._transref:
1703 tr = self._transref()
1703 tr = self._transref()
1704 else:
1704 else:
1705 tr = None
1705 tr = None
1706
1706
1707 if tr and tr.running():
1707 if tr and tr.running():
1708 return tr
1708 return tr
1709 return None
1709 return None
1710
1710
1711 def transaction(self, desc, report=None):
1711 def transaction(self, desc, report=None):
1712 if (self.ui.configbool('devel', 'all-warnings')
1712 if (self.ui.configbool('devel', 'all-warnings')
1713 or self.ui.configbool('devel', 'check-locks')):
1713 or self.ui.configbool('devel', 'check-locks')):
1714 if self._currentlock(self._lockref) is None:
1714 if self._currentlock(self._lockref) is None:
1715 raise error.ProgrammingError('transaction requires locking')
1715 raise error.ProgrammingError('transaction requires locking')
1716 tr = self.currenttransaction()
1716 tr = self.currenttransaction()
1717 if tr is not None:
1717 if tr is not None:
1718 return tr.nest(name=desc)
1718 return tr.nest(name=desc)
1719
1719
1720 # abort here if the journal already exists
1720 # abort here if the journal already exists
1721 if self.svfs.exists("journal"):
1721 if self.svfs.exists("journal"):
1722 raise error.RepoError(
1722 raise error.RepoError(
1723 _("abandoned transaction found"),
1723 _("abandoned transaction found"),
1724 hint=_("run 'hg recover' to clean up transaction"))
1724 hint=_("run 'hg recover' to clean up transaction"))
1725
1725
1726 idbase = "%.40f#%f" % (random.random(), time.time())
1726 idbase = "%.40f#%f" % (random.random(), time.time())
1727 ha = hex(hashlib.sha1(idbase).digest())
1727 ha = hex(hashlib.sha1(idbase).digest())
1728 txnid = 'TXN:' + ha
1728 txnid = 'TXN:' + ha
1729 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1729 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1730
1730
1731 self._writejournal(desc)
1731 self._writejournal(desc)
1732 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1732 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1733 if report:
1733 if report:
1734 rp = report
1734 rp = report
1735 else:
1735 else:
1736 rp = self.ui.warn
1736 rp = self.ui.warn
1737 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1737 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1738 # we must avoid cyclic reference between repo and transaction.
1738 # we must avoid cyclic reference between repo and transaction.
1739 reporef = weakref.ref(self)
1739 reporef = weakref.ref(self)
1740 # Code to track tag movement
1740 # Code to track tag movement
1741 #
1741 #
1742 # Since tags are all handled as file content, it is actually quite hard
1742 # Since tags are all handled as file content, it is actually quite hard
1743 # to track these movement from a code perspective. So we fallback to a
1743 # to track these movement from a code perspective. So we fallback to a
1744 # tracking at the repository level. One could envision to track changes
1744 # tracking at the repository level. One could envision to track changes
1745 # to the '.hgtags' file through changegroup apply but that fails to
1745 # to the '.hgtags' file through changegroup apply but that fails to
1746 # cope with case where transaction expose new heads without changegroup
1746 # cope with case where transaction expose new heads without changegroup
1747 # being involved (eg: phase movement).
1747 # being involved (eg: phase movement).
1748 #
1748 #
1749 # For now, We gate the feature behind a flag since this likely comes
1749 # For now, We gate the feature behind a flag since this likely comes
1750 # with performance impacts. The current code run more often than needed
1750 # with performance impacts. The current code run more often than needed
1751 # and do not use caches as much as it could. The current focus is on
1751 # and do not use caches as much as it could. The current focus is on
1752 # the behavior of the feature so we disable it by default. The flag
1752 # the behavior of the feature so we disable it by default. The flag
1753 # will be removed when we are happy with the performance impact.
1753 # will be removed when we are happy with the performance impact.
1754 #
1754 #
1755 # Once this feature is no longer experimental move the following
1755 # Once this feature is no longer experimental move the following
1756 # documentation to the appropriate help section:
1756 # documentation to the appropriate help section:
1757 #
1757 #
1758 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1758 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1759 # tags (new or changed or deleted tags). In addition the details of
1759 # tags (new or changed or deleted tags). In addition the details of
1760 # these changes are made available in a file at:
1760 # these changes are made available in a file at:
1761 # ``REPOROOT/.hg/changes/tags.changes``.
1761 # ``REPOROOT/.hg/changes/tags.changes``.
1762 # Make sure you check for HG_TAG_MOVED before reading that file as it
1762 # Make sure you check for HG_TAG_MOVED before reading that file as it
1763 # might exist from a previous transaction even if no tag were touched
1763 # might exist from a previous transaction even if no tag were touched
1764 # in this one. Changes are recorded in a line base format::
1764 # in this one. Changes are recorded in a line base format::
1765 #
1765 #
1766 # <action> <hex-node> <tag-name>\n
1766 # <action> <hex-node> <tag-name>\n
1767 #
1767 #
1768 # Actions are defined as follow:
1768 # Actions are defined as follow:
1769 # "-R": tag is removed,
1769 # "-R": tag is removed,
1770 # "+A": tag is added,
1770 # "+A": tag is added,
1771 # "-M": tag is moved (old value),
1771 # "-M": tag is moved (old value),
1772 # "+M": tag is moved (new value),
1772 # "+M": tag is moved (new value),
1773 tracktags = lambda x: None
1773 tracktags = lambda x: None
1774 # experimental config: experimental.hook-track-tags
1774 # experimental config: experimental.hook-track-tags
1775 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1775 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1776 if desc != 'strip' and shouldtracktags:
1776 if desc != 'strip' and shouldtracktags:
1777 oldheads = self.changelog.headrevs()
1777 oldheads = self.changelog.headrevs()
1778 def tracktags(tr2):
1778 def tracktags(tr2):
1779 repo = reporef()
1779 repo = reporef()
1780 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1780 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1781 newheads = repo.changelog.headrevs()
1781 newheads = repo.changelog.headrevs()
1782 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1782 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1783 # notes: we compare lists here.
1783 # notes: we compare lists here.
1784 # As we do it only once buiding set would not be cheaper
1784 # As we do it only once buiding set would not be cheaper
1785 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1785 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1786 if changes:
1786 if changes:
1787 tr2.hookargs['tag_moved'] = '1'
1787 tr2.hookargs['tag_moved'] = '1'
1788 with repo.vfs('changes/tags.changes', 'w',
1788 with repo.vfs('changes/tags.changes', 'w',
1789 atomictemp=True) as changesfile:
1789 atomictemp=True) as changesfile:
1790 # note: we do not register the file to the transaction
1790 # note: we do not register the file to the transaction
1791 # because we needs it to still exist on the transaction
1791 # because we needs it to still exist on the transaction
1792 # is close (for txnclose hooks)
1792 # is close (for txnclose hooks)
1793 tagsmod.writediff(changesfile, changes)
1793 tagsmod.writediff(changesfile, changes)
1794 def validate(tr2):
1794 def validate(tr2):
1795 """will run pre-closing hooks"""
1795 """will run pre-closing hooks"""
1796 # XXX the transaction API is a bit lacking here so we take a hacky
1796 # XXX the transaction API is a bit lacking here so we take a hacky
1797 # path for now
1797 # path for now
1798 #
1798 #
1799 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1799 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1800 # dict is copied before these run. In addition we needs the data
1800 # dict is copied before these run. In addition we needs the data
1801 # available to in memory hooks too.
1801 # available to in memory hooks too.
1802 #
1802 #
1803 # Moreover, we also need to make sure this runs before txnclose
1803 # Moreover, we also need to make sure this runs before txnclose
1804 # hooks and there is no "pending" mechanism that would execute
1804 # hooks and there is no "pending" mechanism that would execute
1805 # logic only if hooks are about to run.
1805 # logic only if hooks are about to run.
1806 #
1806 #
1807 # Fixing this limitation of the transaction is also needed to track
1807 # Fixing this limitation of the transaction is also needed to track
1808 # other families of changes (bookmarks, phases, obsolescence).
1808 # other families of changes (bookmarks, phases, obsolescence).
1809 #
1809 #
1810 # This will have to be fixed before we remove the experimental
1810 # This will have to be fixed before we remove the experimental
1811 # gating.
1811 # gating.
1812 tracktags(tr2)
1812 tracktags(tr2)
1813 repo = reporef()
1813 repo = reporef()
1814 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1814 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1815 scmutil.enforcesinglehead(repo, tr2, desc)
1815 scmutil.enforcesinglehead(repo, tr2, desc)
1816 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1816 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1817 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1817 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1818 args = tr.hookargs.copy()
1818 args = tr.hookargs.copy()
1819 args.update(bookmarks.preparehookargs(name, old, new))
1819 args.update(bookmarks.preparehookargs(name, old, new))
1820 repo.hook('pretxnclose-bookmark', throw=True,
1820 repo.hook('pretxnclose-bookmark', throw=True,
1821 **pycompat.strkwargs(args))
1821 **pycompat.strkwargs(args))
1822 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1822 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1823 cl = repo.unfiltered().changelog
1823 cl = repo.unfiltered().changelog
1824 for rev, (old, new) in tr.changes['phases'].items():
1824 for rev, (old, new) in tr.changes['phases'].items():
1825 args = tr.hookargs.copy()
1825 args = tr.hookargs.copy()
1826 node = hex(cl.node(rev))
1826 node = hex(cl.node(rev))
1827 args.update(phases.preparehookargs(node, old, new))
1827 args.update(phases.preparehookargs(node, old, new))
1828 repo.hook('pretxnclose-phase', throw=True,
1828 repo.hook('pretxnclose-phase', throw=True,
1829 **pycompat.strkwargs(args))
1829 **pycompat.strkwargs(args))
1830
1830
1831 repo.hook('pretxnclose', throw=True,
1831 repo.hook('pretxnclose', throw=True,
1832 **pycompat.strkwargs(tr.hookargs))
1832 **pycompat.strkwargs(tr.hookargs))
1833 def releasefn(tr, success):
1833 def releasefn(tr, success):
1834 repo = reporef()
1834 repo = reporef()
1835 if success:
1835 if success:
1836 # this should be explicitly invoked here, because
1836 # this should be explicitly invoked here, because
1837 # in-memory changes aren't written out at closing
1837 # in-memory changes aren't written out at closing
1838 # transaction, if tr.addfilegenerator (via
1838 # transaction, if tr.addfilegenerator (via
1839 # dirstate.write or so) isn't invoked while
1839 # dirstate.write or so) isn't invoked while
1840 # transaction running
1840 # transaction running
1841 repo.dirstate.write(None)
1841 repo.dirstate.write(None)
1842 else:
1842 else:
1843 # discard all changes (including ones already written
1843 # discard all changes (including ones already written
1844 # out) in this transaction
1844 # out) in this transaction
1845 narrowspec.restorebackup(self, 'journal.narrowspec')
1845 narrowspec.restorebackup(self, 'journal.narrowspec')
1846 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1846 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1847 repo.dirstate.restorebackup(None, 'journal.dirstate')
1847 repo.dirstate.restorebackup(None, 'journal.dirstate')
1848
1848
1849 repo.invalidate(clearfilecache=True)
1849 repo.invalidate(clearfilecache=True)
1850
1850
1851 tr = transaction.transaction(rp, self.svfs, vfsmap,
1851 tr = transaction.transaction(rp, self.svfs, vfsmap,
1852 "journal",
1852 "journal",
1853 "undo",
1853 "undo",
1854 aftertrans(renames),
1854 aftertrans(renames),
1855 self.store.createmode,
1855 self.store.createmode,
1856 validator=validate,
1856 validator=validate,
1857 releasefn=releasefn,
1857 releasefn=releasefn,
1858 checkambigfiles=_cachedfiles,
1858 checkambigfiles=_cachedfiles,
1859 name=desc)
1859 name=desc)
1860 tr.changes['origrepolen'] = len(self)
1860 tr.changes['origrepolen'] = len(self)
1861 tr.changes['obsmarkers'] = set()
1861 tr.changes['obsmarkers'] = set()
1862 tr.changes['phases'] = {}
1862 tr.changes['phases'] = {}
1863 tr.changes['bookmarks'] = {}
1863 tr.changes['bookmarks'] = {}
1864
1864
1865 tr.hookargs['txnid'] = txnid
1865 tr.hookargs['txnid'] = txnid
1866 tr.hookargs['txnname'] = desc
1866 tr.hookargs['txnname'] = desc
1867 # note: writing the fncache only during finalize mean that the file is
1867 # note: writing the fncache only during finalize mean that the file is
1868 # outdated when running hooks. As fncache is used for streaming clone,
1868 # outdated when running hooks. As fncache is used for streaming clone,
1869 # this is not expected to break anything that happen during the hooks.
1869 # this is not expected to break anything that happen during the hooks.
1870 tr.addfinalize('flush-fncache', self.store.write)
1870 tr.addfinalize('flush-fncache', self.store.write)
1871 def txnclosehook(tr2):
1871 def txnclosehook(tr2):
1872 """To be run if transaction is successful, will schedule a hook run
1872 """To be run if transaction is successful, will schedule a hook run
1873 """
1873 """
1874 # Don't reference tr2 in hook() so we don't hold a reference.
1874 # Don't reference tr2 in hook() so we don't hold a reference.
1875 # This reduces memory consumption when there are multiple
1875 # This reduces memory consumption when there are multiple
1876 # transactions per lock. This can likely go away if issue5045
1876 # transactions per lock. This can likely go away if issue5045
1877 # fixes the function accumulation.
1877 # fixes the function accumulation.
1878 hookargs = tr2.hookargs
1878 hookargs = tr2.hookargs
1879
1879
1880 def hookfunc():
1880 def hookfunc():
1881 repo = reporef()
1881 repo = reporef()
1882 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1882 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1883 bmchanges = sorted(tr.changes['bookmarks'].items())
1883 bmchanges = sorted(tr.changes['bookmarks'].items())
1884 for name, (old, new) in bmchanges:
1884 for name, (old, new) in bmchanges:
1885 args = tr.hookargs.copy()
1885 args = tr.hookargs.copy()
1886 args.update(bookmarks.preparehookargs(name, old, new))
1886 args.update(bookmarks.preparehookargs(name, old, new))
1887 repo.hook('txnclose-bookmark', throw=False,
1887 repo.hook('txnclose-bookmark', throw=False,
1888 **pycompat.strkwargs(args))
1888 **pycompat.strkwargs(args))
1889
1889
1890 if hook.hashook(repo.ui, 'txnclose-phase'):
1890 if hook.hashook(repo.ui, 'txnclose-phase'):
1891 cl = repo.unfiltered().changelog
1891 cl = repo.unfiltered().changelog
1892 phasemv = sorted(tr.changes['phases'].items())
1892 phasemv = sorted(tr.changes['phases'].items())
1893 for rev, (old, new) in phasemv:
1893 for rev, (old, new) in phasemv:
1894 args = tr.hookargs.copy()
1894 args = tr.hookargs.copy()
1895 node = hex(cl.node(rev))
1895 node = hex(cl.node(rev))
1896 args.update(phases.preparehookargs(node, old, new))
1896 args.update(phases.preparehookargs(node, old, new))
1897 repo.hook('txnclose-phase', throw=False,
1897 repo.hook('txnclose-phase', throw=False,
1898 **pycompat.strkwargs(args))
1898 **pycompat.strkwargs(args))
1899
1899
1900 repo.hook('txnclose', throw=False,
1900 repo.hook('txnclose', throw=False,
1901 **pycompat.strkwargs(hookargs))
1901 **pycompat.strkwargs(hookargs))
1902 reporef()._afterlock(hookfunc)
1902 reporef()._afterlock(hookfunc)
1903 tr.addfinalize('txnclose-hook', txnclosehook)
1903 tr.addfinalize('txnclose-hook', txnclosehook)
1904 # Include a leading "-" to make it happen before the transaction summary
1904 # Include a leading "-" to make it happen before the transaction summary
1905 # reports registered via scmutil.registersummarycallback() whose names
1905 # reports registered via scmutil.registersummarycallback() whose names
1906 # are 00-txnreport etc. That way, the caches will be warm when the
1906 # are 00-txnreport etc. That way, the caches will be warm when the
1907 # callbacks run.
1907 # callbacks run.
1908 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1908 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1909 def txnaborthook(tr2):
1909 def txnaborthook(tr2):
1910 """To be run if transaction is aborted
1910 """To be run if transaction is aborted
1911 """
1911 """
1912 reporef().hook('txnabort', throw=False,
1912 reporef().hook('txnabort', throw=False,
1913 **pycompat.strkwargs(tr2.hookargs))
1913 **pycompat.strkwargs(tr2.hookargs))
1914 tr.addabort('txnabort-hook', txnaborthook)
1914 tr.addabort('txnabort-hook', txnaborthook)
1915 # avoid eager cache invalidation. in-memory data should be identical
1915 # avoid eager cache invalidation. in-memory data should be identical
1916 # to stored data if transaction has no error.
1916 # to stored data if transaction has no error.
1917 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1917 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1918 self._transref = weakref.ref(tr)
1918 self._transref = weakref.ref(tr)
1919 scmutil.registersummarycallback(self, tr, desc)
1919 scmutil.registersummarycallback(self, tr, desc)
1920 return tr
1920 return tr
1921
1921
1922 def _journalfiles(self):
1922 def _journalfiles(self):
1923 return ((self.svfs, 'journal'),
1923 return ((self.svfs, 'journal'),
1924 (self.svfs, 'journal.narrowspec'),
1924 (self.svfs, 'journal.narrowspec'),
1925 (self.vfs, 'journal.narrowspec.dirstate'),
1925 (self.vfs, 'journal.narrowspec.dirstate'),
1926 (self.vfs, 'journal.dirstate'),
1926 (self.vfs, 'journal.dirstate'),
1927 (self.vfs, 'journal.branch'),
1927 (self.vfs, 'journal.branch'),
1928 (self.vfs, 'journal.desc'),
1928 (self.vfs, 'journal.desc'),
1929 (self.vfs, 'journal.bookmarks'),
1929 (self.vfs, 'journal.bookmarks'),
1930 (self.svfs, 'journal.phaseroots'))
1930 (self.svfs, 'journal.phaseroots'))
1931
1931
1932 def undofiles(self):
1932 def undofiles(self):
1933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1934
1934
1935 @unfilteredmethod
1935 @unfilteredmethod
1936 def _writejournal(self, desc):
1936 def _writejournal(self, desc):
1937 self.dirstate.savebackup(None, 'journal.dirstate')
1937 self.dirstate.savebackup(None, 'journal.dirstate')
1938 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1938 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1939 narrowspec.savebackup(self, 'journal.narrowspec')
1939 narrowspec.savebackup(self, 'journal.narrowspec')
1940 self.vfs.write("journal.branch",
1940 self.vfs.write("journal.branch",
1941 encoding.fromlocal(self.dirstate.branch()))
1941 encoding.fromlocal(self.dirstate.branch()))
1942 self.vfs.write("journal.desc",
1942 self.vfs.write("journal.desc",
1943 "%d\n%s\n" % (len(self), desc))
1943 "%d\n%s\n" % (len(self), desc))
1944 self.vfs.write("journal.bookmarks",
1944 self.vfs.write("journal.bookmarks",
1945 self.vfs.tryread("bookmarks"))
1945 self.vfs.tryread("bookmarks"))
1946 self.svfs.write("journal.phaseroots",
1946 self.svfs.write("journal.phaseroots",
1947 self.svfs.tryread("phaseroots"))
1947 self.svfs.tryread("phaseroots"))
1948
1948
1949 def recover(self):
1949 def recover(self):
1950 with self.lock():
1950 with self.lock():
1951 if self.svfs.exists("journal"):
1951 if self.svfs.exists("journal"):
1952 self.ui.status(_("rolling back interrupted transaction\n"))
1952 self.ui.status(_("rolling back interrupted transaction\n"))
1953 vfsmap = {'': self.svfs,
1953 vfsmap = {'': self.svfs,
1954 'plain': self.vfs,}
1954 'plain': self.vfs,}
1955 transaction.rollback(self.svfs, vfsmap, "journal",
1955 transaction.rollback(self.svfs, vfsmap, "journal",
1956 self.ui.warn,
1956 self.ui.warn,
1957 checkambigfiles=_cachedfiles)
1957 checkambigfiles=_cachedfiles)
1958 self.invalidate()
1958 self.invalidate()
1959 return True
1959 return True
1960 else:
1960 else:
1961 self.ui.warn(_("no interrupted transaction available\n"))
1961 self.ui.warn(_("no interrupted transaction available\n"))
1962 return False
1962 return False
1963
1963
1964 def rollback(self, dryrun=False, force=False):
1964 def rollback(self, dryrun=False, force=False):
1965 wlock = lock = dsguard = None
1965 wlock = lock = dsguard = None
1966 try:
1966 try:
1967 wlock = self.wlock()
1967 wlock = self.wlock()
1968 lock = self.lock()
1968 lock = self.lock()
1969 if self.svfs.exists("undo"):
1969 if self.svfs.exists("undo"):
1970 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1970 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1971
1971
1972 return self._rollback(dryrun, force, dsguard)
1972 return self._rollback(dryrun, force, dsguard)
1973 else:
1973 else:
1974 self.ui.warn(_("no rollback information available\n"))
1974 self.ui.warn(_("no rollback information available\n"))
1975 return 1
1975 return 1
1976 finally:
1976 finally:
1977 release(dsguard, lock, wlock)
1977 release(dsguard, lock, wlock)
1978
1978
1979 @unfilteredmethod # Until we get smarter cache management
1979 @unfilteredmethod # Until we get smarter cache management
1980 def _rollback(self, dryrun, force, dsguard):
1980 def _rollback(self, dryrun, force, dsguard):
1981 ui = self.ui
1981 ui = self.ui
1982 try:
1982 try:
1983 args = self.vfs.read('undo.desc').splitlines()
1983 args = self.vfs.read('undo.desc').splitlines()
1984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1985 if len(args) >= 3:
1985 if len(args) >= 3:
1986 detail = args[2]
1986 detail = args[2]
1987 oldtip = oldlen - 1
1987 oldtip = oldlen - 1
1988
1988
1989 if detail and ui.verbose:
1989 if detail and ui.verbose:
1990 msg = (_('repository tip rolled back to revision %d'
1990 msg = (_('repository tip rolled back to revision %d'
1991 ' (undo %s: %s)\n')
1991 ' (undo %s: %s)\n')
1992 % (oldtip, desc, detail))
1992 % (oldtip, desc, detail))
1993 else:
1993 else:
1994 msg = (_('repository tip rolled back to revision %d'
1994 msg = (_('repository tip rolled back to revision %d'
1995 ' (undo %s)\n')
1995 ' (undo %s)\n')
1996 % (oldtip, desc))
1996 % (oldtip, desc))
1997 except IOError:
1997 except IOError:
1998 msg = _('rolling back unknown transaction\n')
1998 msg = _('rolling back unknown transaction\n')
1999 desc = None
1999 desc = None
2000
2000
2001 if not force and self['.'] != self['tip'] and desc == 'commit':
2001 if not force and self['.'] != self['tip'] and desc == 'commit':
2002 raise error.Abort(
2002 raise error.Abort(
2003 _('rollback of last commit while not checked out '
2003 _('rollback of last commit while not checked out '
2004 'may lose data'), hint=_('use -f to force'))
2004 'may lose data'), hint=_('use -f to force'))
2005
2005
2006 ui.status(msg)
2006 ui.status(msg)
2007 if dryrun:
2007 if dryrun:
2008 return 0
2008 return 0
2009
2009
2010 parents = self.dirstate.parents()
2010 parents = self.dirstate.parents()
2011 self.destroying()
2011 self.destroying()
2012 vfsmap = {'plain': self.vfs, '': self.svfs}
2012 vfsmap = {'plain': self.vfs, '': self.svfs}
2013 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2013 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2014 checkambigfiles=_cachedfiles)
2014 checkambigfiles=_cachedfiles)
2015 if self.vfs.exists('undo.bookmarks'):
2015 if self.vfs.exists('undo.bookmarks'):
2016 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2016 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2017 if self.svfs.exists('undo.phaseroots'):
2017 if self.svfs.exists('undo.phaseroots'):
2018 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2018 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2019 self.invalidate()
2019 self.invalidate()
2020
2020
2021 parentgone = any(p not in self.changelog.nodemap for p in parents)
2021 parentgone = any(p not in self.changelog.nodemap for p in parents)
2022 if parentgone:
2022 if parentgone:
2023 # prevent dirstateguard from overwriting already restored one
2023 # prevent dirstateguard from overwriting already restored one
2024 dsguard.close()
2024 dsguard.close()
2025
2025
2026 narrowspec.restorebackup(self, 'undo.narrowspec')
2026 narrowspec.restorebackup(self, 'undo.narrowspec')
2027 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2027 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2028 self.dirstate.restorebackup(None, 'undo.dirstate')
2028 self.dirstate.restorebackup(None, 'undo.dirstate')
2029 try:
2029 try:
2030 branch = self.vfs.read('undo.branch')
2030 branch = self.vfs.read('undo.branch')
2031 self.dirstate.setbranch(encoding.tolocal(branch))
2031 self.dirstate.setbranch(encoding.tolocal(branch))
2032 except IOError:
2032 except IOError:
2033 ui.warn(_('named branch could not be reset: '
2033 ui.warn(_('named branch could not be reset: '
2034 'current branch is still \'%s\'\n')
2034 'current branch is still \'%s\'\n')
2035 % self.dirstate.branch())
2035 % self.dirstate.branch())
2036
2036
2037 parents = tuple([p.rev() for p in self[None].parents()])
2037 parents = tuple([p.rev() for p in self[None].parents()])
2038 if len(parents) > 1:
2038 if len(parents) > 1:
2039 ui.status(_('working directory now based on '
2039 ui.status(_('working directory now based on '
2040 'revisions %d and %d\n') % parents)
2040 'revisions %d and %d\n') % parents)
2041 else:
2041 else:
2042 ui.status(_('working directory now based on '
2042 ui.status(_('working directory now based on '
2043 'revision %d\n') % parents)
2043 'revision %d\n') % parents)
2044 mergemod.mergestate.clean(self, self['.'].node())
2044 mergemod.mergestate.clean(self, self['.'].node())
2045
2045
2046 # TODO: if we know which new heads may result from this rollback, pass
2046 # TODO: if we know which new heads may result from this rollback, pass
2047 # them to destroy(), which will prevent the branchhead cache from being
2047 # them to destroy(), which will prevent the branchhead cache from being
2048 # invalidated.
2048 # invalidated.
2049 self.destroyed()
2049 self.destroyed()
2050 return 0
2050 return 0
2051
2051
2052 def _buildcacheupdater(self, newtransaction):
2052 def _buildcacheupdater(self, newtransaction):
2053 """called during transaction to build the callback updating cache
2053 """called during transaction to build the callback updating cache
2054
2054
2055 Lives on the repository to help extension who might want to augment
2055 Lives on the repository to help extension who might want to augment
2056 this logic. For this purpose, the created transaction is passed to the
2056 this logic. For this purpose, the created transaction is passed to the
2057 method.
2057 method.
2058 """
2058 """
2059 # we must avoid cyclic reference between repo and transaction.
2059 # we must avoid cyclic reference between repo and transaction.
2060 reporef = weakref.ref(self)
2060 reporef = weakref.ref(self)
2061 def updater(tr):
2061 def updater(tr):
2062 repo = reporef()
2062 repo = reporef()
2063 repo.updatecaches(tr)
2063 repo.updatecaches(tr)
2064 return updater
2064 return updater
2065
2065
2066 @unfilteredmethod
2066 @unfilteredmethod
2067 def updatecaches(self, tr=None, full=False):
2067 def updatecaches(self, tr=None, full=False):
2068 """warm appropriate caches
2068 """warm appropriate caches
2069
2069
2070 If this function is called after a transaction closed. The transaction
2070 If this function is called after a transaction closed. The transaction
2071 will be available in the 'tr' argument. This can be used to selectively
2071 will be available in the 'tr' argument. This can be used to selectively
2072 update caches relevant to the changes in that transaction.
2072 update caches relevant to the changes in that transaction.
2073
2073
2074 If 'full' is set, make sure all caches the function knows about have
2074 If 'full' is set, make sure all caches the function knows about have
2075 up-to-date data. Even the ones usually loaded more lazily.
2075 up-to-date data. Even the ones usually loaded more lazily.
2076 """
2076 """
2077 if tr is not None and tr.hookargs.get('source') == 'strip':
2077 if tr is not None and tr.hookargs.get('source') == 'strip':
2078 # During strip, many caches are invalid but
2078 # During strip, many caches are invalid but
2079 # later call to `destroyed` will refresh them.
2079 # later call to `destroyed` will refresh them.
2080 return
2080 return
2081
2081
2082 if tr is None or tr.changes['origrepolen'] < len(self):
2082 if tr is None or tr.changes['origrepolen'] < len(self):
2083 # accessing the 'ser ved' branchmap should refresh all the others,
2083 # accessing the 'ser ved' branchmap should refresh all the others,
2084 self.ui.debug('updating the branch cache\n')
2084 self.ui.debug('updating the branch cache\n')
2085 self.filtered('served').branchmap()
2085 self.filtered('served').branchmap()
2086
2086
2087 if full:
2087 if full:
2088 unfi = self.unfiltered()
2088 unfi = self.unfiltered()
2089 rbc = unfi.revbranchcache()
2089 rbc = unfi.revbranchcache()
2090 for r in unfi.changelog:
2090 for r in unfi.changelog:
2091 rbc.branchinfo(r)
2091 rbc.branchinfo(r)
2092 rbc.write()
2092 rbc.write()
2093
2093
2094 # ensure the working copy parents are in the manifestfulltextcache
2094 # ensure the working copy parents are in the manifestfulltextcache
2095 for ctx in self['.'].parents():
2095 for ctx in self['.'].parents():
2096 ctx.manifest() # accessing the manifest is enough
2096 ctx.manifest() # accessing the manifest is enough
2097
2097
2098 # accessing tags warm the cache
2098 # accessing tags warm the cache
2099 self.tags()
2099 self.tags()
2100 self.filtered('served').tags()
2100 self.filtered('served').tags()
2101
2101
2102 def invalidatecaches(self):
2102 def invalidatecaches(self):
2103
2103
2104 if r'_tagscache' in vars(self):
2104 if r'_tagscache' in vars(self):
2105 # can't use delattr on proxy
2105 # can't use delattr on proxy
2106 del self.__dict__[r'_tagscache']
2106 del self.__dict__[r'_tagscache']
2107
2107
2108 self._branchcaches.clear()
2108 self._branchcaches.clear()
2109 self.invalidatevolatilesets()
2109 self.invalidatevolatilesets()
2110 self._sparsesignaturecache.clear()
2110 self._sparsesignaturecache.clear()
2111
2111
2112 def invalidatevolatilesets(self):
2112 def invalidatevolatilesets(self):
2113 self.filteredrevcache.clear()
2113 self.filteredrevcache.clear()
2114 obsolete.clearobscaches(self)
2114 obsolete.clearobscaches(self)
2115
2115
2116 def invalidatedirstate(self):
2116 def invalidatedirstate(self):
2117 '''Invalidates the dirstate, causing the next call to dirstate
2117 '''Invalidates the dirstate, causing the next call to dirstate
2118 to check if it was modified since the last time it was read,
2118 to check if it was modified since the last time it was read,
2119 rereading it if it has.
2119 rereading it if it has.
2120
2120
2121 This is different to dirstate.invalidate() that it doesn't always
2121 This is different to dirstate.invalidate() that it doesn't always
2122 rereads the dirstate. Use dirstate.invalidate() if you want to
2122 rereads the dirstate. Use dirstate.invalidate() if you want to
2123 explicitly read the dirstate again (i.e. restoring it to a previous
2123 explicitly read the dirstate again (i.e. restoring it to a previous
2124 known good state).'''
2124 known good state).'''
2125 if hasunfilteredcache(self, r'dirstate'):
2125 if hasunfilteredcache(self, r'dirstate'):
2126 for k in self.dirstate._filecache:
2126 for k in self.dirstate._filecache:
2127 try:
2127 try:
2128 delattr(self.dirstate, k)
2128 delattr(self.dirstate, k)
2129 except AttributeError:
2129 except AttributeError:
2130 pass
2130 pass
2131 delattr(self.unfiltered(), r'dirstate')
2131 delattr(self.unfiltered(), r'dirstate')
2132
2132
2133 def invalidate(self, clearfilecache=False):
2133 def invalidate(self, clearfilecache=False):
2134 '''Invalidates both store and non-store parts other than dirstate
2134 '''Invalidates both store and non-store parts other than dirstate
2135
2135
2136 If a transaction is running, invalidation of store is omitted,
2136 If a transaction is running, invalidation of store is omitted,
2137 because discarding in-memory changes might cause inconsistency
2137 because discarding in-memory changes might cause inconsistency
2138 (e.g. incomplete fncache causes unintentional failure, but
2138 (e.g. incomplete fncache causes unintentional failure, but
2139 redundant one doesn't).
2139 redundant one doesn't).
2140 '''
2140 '''
2141 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2141 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2142 for k in list(self._filecache.keys()):
2142 for k in list(self._filecache.keys()):
2143 # dirstate is invalidated separately in invalidatedirstate()
2143 # dirstate is invalidated separately in invalidatedirstate()
2144 if k == 'dirstate':
2144 if k == 'dirstate':
2145 continue
2145 continue
2146 if (k == 'changelog' and
2146 if (k == 'changelog' and
2147 self.currenttransaction() and
2147 self.currenttransaction() and
2148 self.changelog._delayed):
2148 self.changelog._delayed):
2149 # The changelog object may store unwritten revisions. We don't
2149 # The changelog object may store unwritten revisions. We don't
2150 # want to lose them.
2150 # want to lose them.
2151 # TODO: Solve the problem instead of working around it.
2151 # TODO: Solve the problem instead of working around it.
2152 continue
2152 continue
2153
2153
2154 if clearfilecache:
2154 if clearfilecache:
2155 del self._filecache[k]
2155 del self._filecache[k]
2156 try:
2156 try:
2157 delattr(unfiltered, k)
2157 delattr(unfiltered, k)
2158 except AttributeError:
2158 except AttributeError:
2159 pass
2159 pass
2160 self.invalidatecaches()
2160 self.invalidatecaches()
2161 if not self.currenttransaction():
2161 if not self.currenttransaction():
2162 # TODO: Changing contents of store outside transaction
2162 # TODO: Changing contents of store outside transaction
2163 # causes inconsistency. We should make in-memory store
2163 # causes inconsistency. We should make in-memory store
2164 # changes detectable, and abort if changed.
2164 # changes detectable, and abort if changed.
2165 self.store.invalidatecaches()
2165 self.store.invalidatecaches()
2166
2166
2167 def invalidateall(self):
2167 def invalidateall(self):
2168 '''Fully invalidates both store and non-store parts, causing the
2168 '''Fully invalidates both store and non-store parts, causing the
2169 subsequent operation to reread any outside changes.'''
2169 subsequent operation to reread any outside changes.'''
2170 # extension should hook this to invalidate its caches
2170 # extension should hook this to invalidate its caches
2171 self.invalidate()
2171 self.invalidate()
2172 self.invalidatedirstate()
2172 self.invalidatedirstate()
2173
2173
2174 @unfilteredmethod
2174 @unfilteredmethod
2175 def _refreshfilecachestats(self, tr):
2175 def _refreshfilecachestats(self, tr):
2176 """Reload stats of cached files so that they are flagged as valid"""
2176 """Reload stats of cached files so that they are flagged as valid"""
2177 for k, ce in self._filecache.items():
2177 for k, ce in self._filecache.items():
2178 k = pycompat.sysstr(k)
2178 k = pycompat.sysstr(k)
2179 if k == r'dirstate' or k not in self.__dict__:
2179 if k == r'dirstate' or k not in self.__dict__:
2180 continue
2180 continue
2181 ce.refresh()
2181 ce.refresh()
2182
2182
2183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2184 inheritchecker=None, parentenvvar=None):
2184 inheritchecker=None, parentenvvar=None):
2185 parentlock = None
2185 parentlock = None
2186 # the contents of parentenvvar are used by the underlying lock to
2186 # the contents of parentenvvar are used by the underlying lock to
2187 # determine whether it can be inherited
2187 # determine whether it can be inherited
2188 if parentenvvar is not None:
2188 if parentenvvar is not None:
2189 parentlock = encoding.environ.get(parentenvvar)
2189 parentlock = encoding.environ.get(parentenvvar)
2190
2190
2191 timeout = 0
2191 timeout = 0
2192 warntimeout = 0
2192 warntimeout = 0
2193 if wait:
2193 if wait:
2194 timeout = self.ui.configint("ui", "timeout")
2194 timeout = self.ui.configint("ui", "timeout")
2195 warntimeout = self.ui.configint("ui", "timeout.warn")
2195 warntimeout = self.ui.configint("ui", "timeout.warn")
2196 # internal config: ui.signal-safe-lock
2196 # internal config: ui.signal-safe-lock
2197 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2197 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2198
2198
2199 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2199 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2200 releasefn=releasefn,
2200 releasefn=releasefn,
2201 acquirefn=acquirefn, desc=desc,
2201 acquirefn=acquirefn, desc=desc,
2202 inheritchecker=inheritchecker,
2202 inheritchecker=inheritchecker,
2203 parentlock=parentlock,
2203 parentlock=parentlock,
2204 signalsafe=signalsafe)
2204 signalsafe=signalsafe)
2205 return l
2205 return l
2206
2206
2207 def _afterlock(self, callback):
2207 def _afterlock(self, callback):
2208 """add a callback to be run when the repository is fully unlocked
2208 """add a callback to be run when the repository is fully unlocked
2209
2209
2210 The callback will be executed when the outermost lock is released
2210 The callback will be executed when the outermost lock is released
2211 (with wlock being higher level than 'lock')."""
2211 (with wlock being higher level than 'lock')."""
2212 for ref in (self._wlockref, self._lockref):
2212 for ref in (self._wlockref, self._lockref):
2213 l = ref and ref()
2213 l = ref and ref()
2214 if l and l.held:
2214 if l and l.held:
2215 l.postrelease.append(callback)
2215 l.postrelease.append(callback)
2216 break
2216 break
2217 else: # no lock have been found.
2217 else: # no lock have been found.
2218 callback()
2218 callback()
2219
2219
2220 def lock(self, wait=True):
2220 def lock(self, wait=True):
2221 '''Lock the repository store (.hg/store) and return a weak reference
2221 '''Lock the repository store (.hg/store) and return a weak reference
2222 to the lock. Use this before modifying the store (e.g. committing or
2222 to the lock. Use this before modifying the store (e.g. committing or
2223 stripping). If you are opening a transaction, get a lock as well.)
2223 stripping). If you are opening a transaction, get a lock as well.)
2224
2224
2225 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2225 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2226 'wlock' first to avoid a dead-lock hazard.'''
2226 'wlock' first to avoid a dead-lock hazard.'''
2227 l = self._currentlock(self._lockref)
2227 l = self._currentlock(self._lockref)
2228 if l is not None:
2228 if l is not None:
2229 l.lock()
2229 l.lock()
2230 return l
2230 return l
2231
2231
2232 l = self._lock(vfs=self.svfs,
2232 l = self._lock(vfs=self.svfs,
2233 lockname="lock",
2233 lockname="lock",
2234 wait=wait,
2234 wait=wait,
2235 releasefn=None,
2235 releasefn=None,
2236 acquirefn=self.invalidate,
2236 acquirefn=self.invalidate,
2237 desc=_('repository %s') % self.origroot)
2237 desc=_('repository %s') % self.origroot)
2238 self._lockref = weakref.ref(l)
2238 self._lockref = weakref.ref(l)
2239 return l
2239 return l
2240
2240
2241 def _wlockchecktransaction(self):
2241 def _wlockchecktransaction(self):
2242 if self.currenttransaction() is not None:
2242 if self.currenttransaction() is not None:
2243 raise error.LockInheritanceContractViolation(
2243 raise error.LockInheritanceContractViolation(
2244 'wlock cannot be inherited in the middle of a transaction')
2244 'wlock cannot be inherited in the middle of a transaction')
2245
2245
2246 def wlock(self, wait=True):
2246 def wlock(self, wait=True):
2247 '''Lock the non-store parts of the repository (everything under
2247 '''Lock the non-store parts of the repository (everything under
2248 .hg except .hg/store) and return a weak reference to the lock.
2248 .hg except .hg/store) and return a weak reference to the lock.
2249
2249
2250 Use this before modifying files in .hg.
2250 Use this before modifying files in .hg.
2251
2251
2252 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2252 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2253 'wlock' first to avoid a dead-lock hazard.'''
2253 'wlock' first to avoid a dead-lock hazard.'''
2254 l = self._wlockref and self._wlockref()
2254 l = self._wlockref and self._wlockref()
2255 if l is not None and l.held:
2255 if l is not None and l.held:
2256 l.lock()
2256 l.lock()
2257 return l
2257 return l
2258
2258
2259 # We do not need to check for non-waiting lock acquisition. Such
2259 # We do not need to check for non-waiting lock acquisition. Such
2260 # acquisition would not cause dead-lock as they would just fail.
2260 # acquisition would not cause dead-lock as they would just fail.
2261 if wait and (self.ui.configbool('devel', 'all-warnings')
2261 if wait and (self.ui.configbool('devel', 'all-warnings')
2262 or self.ui.configbool('devel', 'check-locks')):
2262 or self.ui.configbool('devel', 'check-locks')):
2263 if self._currentlock(self._lockref) is not None:
2263 if self._currentlock(self._lockref) is not None:
2264 self.ui.develwarn('"wlock" acquired after "lock"')
2264 self.ui.develwarn('"wlock" acquired after "lock"')
2265
2265
2266 def unlock():
2266 def unlock():
2267 if self.dirstate.pendingparentchange():
2267 if self.dirstate.pendingparentchange():
2268 self.dirstate.invalidate()
2268 self.dirstate.invalidate()
2269 else:
2269 else:
2270 self.dirstate.write(None)
2270 self.dirstate.write(None)
2271
2271
2272 self._filecache['dirstate'].refresh()
2272 self._filecache['dirstate'].refresh()
2273
2273
2274 l = self._lock(self.vfs, "wlock", wait, unlock,
2274 l = self._lock(self.vfs, "wlock", wait, unlock,
2275 self.invalidatedirstate, _('working directory of %s') %
2275 self.invalidatedirstate, _('working directory of %s') %
2276 self.origroot,
2276 self.origroot,
2277 inheritchecker=self._wlockchecktransaction,
2277 inheritchecker=self._wlockchecktransaction,
2278 parentenvvar='HG_WLOCK_LOCKER')
2278 parentenvvar='HG_WLOCK_LOCKER')
2279 self._wlockref = weakref.ref(l)
2279 self._wlockref = weakref.ref(l)
2280 return l
2280 return l
2281
2281
2282 def _currentlock(self, lockref):
2282 def _currentlock(self, lockref):
2283 """Returns the lock if it's held, or None if it's not."""
2283 """Returns the lock if it's held, or None if it's not."""
2284 if lockref is None:
2284 if lockref is None:
2285 return None
2285 return None
2286 l = lockref()
2286 l = lockref()
2287 if l is None or not l.held:
2287 if l is None or not l.held:
2288 return None
2288 return None
2289 return l
2289 return l
2290
2290
2291 def currentwlock(self):
2291 def currentwlock(self):
2292 """Returns the wlock if it's held, or None if it's not."""
2292 """Returns the wlock if it's held, or None if it's not."""
2293 return self._currentlock(self._wlockref)
2293 return self._currentlock(self._wlockref)
2294
2294
2295 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2295 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2296 """
2296 """
2297 commit an individual file as part of a larger transaction
2297 commit an individual file as part of a larger transaction
2298 """
2298 """
2299
2299
2300 fname = fctx.path()
2300 fname = fctx.path()
2301 fparent1 = manifest1.get(fname, nullid)
2301 fparent1 = manifest1.get(fname, nullid)
2302 fparent2 = manifest2.get(fname, nullid)
2302 fparent2 = manifest2.get(fname, nullid)
2303 if isinstance(fctx, context.filectx):
2303 if isinstance(fctx, context.filectx):
2304 node = fctx.filenode()
2304 node = fctx.filenode()
2305 if node in [fparent1, fparent2]:
2305 if node in [fparent1, fparent2]:
2306 self.ui.debug('reusing %s filelog entry\n' % fname)
2306 self.ui.debug('reusing %s filelog entry\n' % fname)
2307 if manifest1.flags(fname) != fctx.flags():
2307 if manifest1.flags(fname) != fctx.flags():
2308 changelist.append(fname)
2308 changelist.append(fname)
2309 return node
2309 return node
2310
2310
2311 flog = self.file(fname)
2311 flog = self.file(fname)
2312 meta = {}
2312 meta = {}
2313 cfname = fctx.copysource()
2313 cfname = fctx.copysource()
2314 if cfname and cfname != fname:
2314 if cfname and cfname != fname:
2315 # Mark the new revision of this file as a copy of another
2315 # Mark the new revision of this file as a copy of another
2316 # file. This copy data will effectively act as a parent
2316 # file. This copy data will effectively act as a parent
2317 # of this new revision. If this is a merge, the first
2317 # of this new revision. If this is a merge, the first
2318 # parent will be the nullid (meaning "look up the copy data")
2318 # parent will be the nullid (meaning "look up the copy data")
2319 # and the second one will be the other parent. For example:
2319 # and the second one will be the other parent. For example:
2320 #
2320 #
2321 # 0 --- 1 --- 3 rev1 changes file foo
2321 # 0 --- 1 --- 3 rev1 changes file foo
2322 # \ / rev2 renames foo to bar and changes it
2322 # \ / rev2 renames foo to bar and changes it
2323 # \- 2 -/ rev3 should have bar with all changes and
2323 # \- 2 -/ rev3 should have bar with all changes and
2324 # should record that bar descends from
2324 # should record that bar descends from
2325 # bar in rev2 and foo in rev1
2325 # bar in rev2 and foo in rev1
2326 #
2326 #
2327 # this allows this merge to succeed:
2327 # this allows this merge to succeed:
2328 #
2328 #
2329 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2329 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2330 # \ / merging rev3 and rev4 should use bar@rev2
2330 # \ / merging rev3 and rev4 should use bar@rev2
2331 # \- 2 --- 4 as the merge base
2331 # \- 2 --- 4 as the merge base
2332 #
2332 #
2333
2333
2334 crev = manifest1.get(cfname)
2334 crev = manifest1.get(cfname)
2335 newfparent = fparent2
2335 newfparent = fparent2
2336
2336
2337 if manifest2: # branch merge
2337 if manifest2: # branch merge
2338 if fparent2 == nullid or crev is None: # copied on remote side
2338 if fparent2 == nullid or crev is None: # copied on remote side
2339 if cfname in manifest2:
2339 if cfname in manifest2:
2340 crev = manifest2[cfname]
2340 crev = manifest2[cfname]
2341 newfparent = fparent1
2341 newfparent = fparent1
2342
2342
2343 # Here, we used to search backwards through history to try to find
2343 # Here, we used to search backwards through history to try to find
2344 # where the file copy came from if the source of a copy was not in
2344 # where the file copy came from if the source of a copy was not in
2345 # the parent directory. However, this doesn't actually make sense to
2345 # the parent directory. However, this doesn't actually make sense to
2346 # do (what does a copy from something not in your working copy even
2346 # do (what does a copy from something not in your working copy even
2347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2348 # the user that copy information was dropped, so if they didn't
2348 # the user that copy information was dropped, so if they didn't
2349 # expect this outcome it can be fixed, but this is the correct
2349 # expect this outcome it can be fixed, but this is the correct
2350 # behavior in this circumstance.
2350 # behavior in this circumstance.
2351
2351
2352 if crev:
2352 if crev:
2353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2354 meta["copy"] = cfname
2354 meta["copy"] = cfname
2355 meta["copyrev"] = hex(crev)
2355 meta["copyrev"] = hex(crev)
2356 fparent1, fparent2 = nullid, newfparent
2356 fparent1, fparent2 = nullid, newfparent
2357 else:
2357 else:
2358 self.ui.warn(_("warning: can't find ancestor for '%s' "
2358 self.ui.warn(_("warning: can't find ancestor for '%s' "
2359 "copied from '%s'!\n") % (fname, cfname))
2359 "copied from '%s'!\n") % (fname, cfname))
2360
2360
2361 elif fparent1 == nullid:
2361 elif fparent1 == nullid:
2362 fparent1, fparent2 = fparent2, nullid
2362 fparent1, fparent2 = fparent2, nullid
2363 elif fparent2 != nullid:
2363 elif fparent2 != nullid:
2364 # is one parent an ancestor of the other?
2364 # is one parent an ancestor of the other?
2365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2366 if fparent1 in fparentancestors:
2366 if fparent1 in fparentancestors:
2367 fparent1, fparent2 = fparent2, nullid
2367 fparent1, fparent2 = fparent2, nullid
2368 elif fparent2 in fparentancestors:
2368 elif fparent2 in fparentancestors:
2369 fparent2 = nullid
2369 fparent2 = nullid
2370
2370
2371 # is the file changed?
2371 # is the file changed?
2372 text = fctx.data()
2372 text = fctx.data()
2373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2374 changelist.append(fname)
2374 changelist.append(fname)
2375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2376 # are just the flags changed during merge?
2376 # are just the flags changed during merge?
2377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2378 changelist.append(fname)
2378 changelist.append(fname)
2379
2379
2380 return fparent1
2380 return fparent1
2381
2381
2382 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2382 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2383 """check for commit arguments that aren't committable"""
2383 """check for commit arguments that aren't committable"""
2384 if match.isexact() or match.prefix():
2384 if match.isexact() or match.prefix():
2385 matched = set(status.modified + status.added + status.removed)
2385 matched = set(status.modified + status.added + status.removed)
2386
2386
2387 for f in match.files():
2387 for f in match.files():
2388 f = self.dirstate.normalize(f)
2388 f = self.dirstate.normalize(f)
2389 if f == '.' or f in matched or f in wctx.substate:
2389 if f == '.' or f in matched or f in wctx.substate:
2390 continue
2390 continue
2391 if f in status.deleted:
2391 if f in status.deleted:
2392 fail(f, _('file not found!'))
2392 fail(f, _('file not found!'))
2393 if f in vdirs: # visited directory
2393 if f in vdirs: # visited directory
2394 d = f + '/'
2394 d = f + '/'
2395 for mf in matched:
2395 for mf in matched:
2396 if mf.startswith(d):
2396 if mf.startswith(d):
2397 break
2397 break
2398 else:
2398 else:
2399 fail(f, _("no match under directory!"))
2399 fail(f, _("no match under directory!"))
2400 elif f not in self.dirstate:
2400 elif f not in self.dirstate:
2401 fail(f, _("file not tracked!"))
2401 fail(f, _("file not tracked!"))
2402
2402
2403 @unfilteredmethod
2403 @unfilteredmethod
2404 def commit(self, text="", user=None, date=None, match=None, force=False,
2404 def commit(self, text="", user=None, date=None, match=None, force=False,
2405 editor=False, extra=None):
2405 editor=False, extra=None):
2406 """Add a new revision to current repository.
2406 """Add a new revision to current repository.
2407
2407
2408 Revision information is gathered from the working directory,
2408 Revision information is gathered from the working directory,
2409 match can be used to filter the committed files. If editor is
2409 match can be used to filter the committed files. If editor is
2410 supplied, it is called to get a commit message.
2410 supplied, it is called to get a commit message.
2411 """
2411 """
2412 if extra is None:
2412 if extra is None:
2413 extra = {}
2413 extra = {}
2414
2414
2415 def fail(f, msg):
2415 def fail(f, msg):
2416 raise error.Abort('%s: %s' % (f, msg))
2416 raise error.Abort('%s: %s' % (f, msg))
2417
2417
2418 if not match:
2418 if not match:
2419 match = matchmod.always()
2419 match = matchmod.always()
2420
2420
2421 if not force:
2421 if not force:
2422 vdirs = []
2422 vdirs = []
2423 match.explicitdir = vdirs.append
2423 match.explicitdir = vdirs.append
2424 match.bad = fail
2424 match.bad = fail
2425
2425
2426 # lock() for recent changelog (see issue4368)
2426 # lock() for recent changelog (see issue4368)
2427 with self.wlock(), self.lock():
2427 with self.wlock(), self.lock():
2428 wctx = self[None]
2428 wctx = self[None]
2429 merge = len(wctx.parents()) > 1
2429 merge = len(wctx.parents()) > 1
2430
2430
2431 if not force and merge and not match.always():
2431 if not force and merge and not match.always():
2432 raise error.Abort(_('cannot partially commit a merge '
2432 raise error.Abort(_('cannot partially commit a merge '
2433 '(do not specify files or patterns)'))
2433 '(do not specify files or patterns)'))
2434
2434
2435 status = self.status(match=match, clean=force)
2435 status = self.status(match=match, clean=force)
2436 if force:
2436 if force:
2437 status.modified.extend(status.clean) # mq may commit clean files
2437 status.modified.extend(status.clean) # mq may commit clean files
2438
2438
2439 # check subrepos
2439 # check subrepos
2440 subs, commitsubs, newstate = subrepoutil.precommit(
2440 subs, commitsubs, newstate = subrepoutil.precommit(
2441 self.ui, wctx, status, match, force=force)
2441 self.ui, wctx, status, match, force=force)
2442
2442
2443 # make sure all explicit patterns are matched
2443 # make sure all explicit patterns are matched
2444 if not force:
2444 if not force:
2445 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2445 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2446
2446
2447 cctx = context.workingcommitctx(self, status,
2447 cctx = context.workingcommitctx(self, status,
2448 text, user, date, extra)
2448 text, user, date, extra)
2449
2449
2450 # internal config: ui.allowemptycommit
2450 # internal config: ui.allowemptycommit
2451 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2451 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2452 or extra.get('close') or merge or cctx.files()
2452 or extra.get('close') or merge or cctx.files()
2453 or self.ui.configbool('ui', 'allowemptycommit'))
2453 or self.ui.configbool('ui', 'allowemptycommit'))
2454 if not allowemptycommit:
2454 if not allowemptycommit:
2455 return None
2455 return None
2456
2456
2457 if merge and cctx.deleted():
2457 if merge and cctx.deleted():
2458 raise error.Abort(_("cannot commit merge with missing files"))
2458 raise error.Abort(_("cannot commit merge with missing files"))
2459
2459
2460 ms = mergemod.mergestate.read(self)
2460 ms = mergemod.mergestate.read(self)
2461 mergeutil.checkunresolved(ms)
2461 mergeutil.checkunresolved(ms)
2462
2462
2463 if editor:
2463 if editor:
2464 cctx._text = editor(self, cctx, subs)
2464 cctx._text = editor(self, cctx, subs)
2465 edited = (text != cctx._text)
2465 edited = (text != cctx._text)
2466
2466
2467 # Save commit message in case this transaction gets rolled back
2467 # Save commit message in case this transaction gets rolled back
2468 # (e.g. by a pretxncommit hook). Leave the content alone on
2468 # (e.g. by a pretxncommit hook). Leave the content alone on
2469 # the assumption that the user will use the same editor again.
2469 # the assumption that the user will use the same editor again.
2470 msgfn = self.savecommitmessage(cctx._text)
2470 msgfn = self.savecommitmessage(cctx._text)
2471
2471
2472 # commit subs and write new state
2472 # commit subs and write new state
2473 if subs:
2473 if subs:
2474 uipathfn = scmutil.getuipathfn(self)
2474 uipathfn = scmutil.getuipathfn(self)
2475 for s in sorted(commitsubs):
2475 for s in sorted(commitsubs):
2476 sub = wctx.sub(s)
2476 sub = wctx.sub(s)
2477 self.ui.status(_('committing subrepository %s\n') %
2477 self.ui.status(_('committing subrepository %s\n') %
2478 uipathfn(subrepoutil.subrelpath(sub)))
2478 uipathfn(subrepoutil.subrelpath(sub)))
2479 sr = sub.commit(cctx._text, user, date)
2479 sr = sub.commit(cctx._text, user, date)
2480 newstate[s] = (newstate[s][0], sr)
2480 newstate[s] = (newstate[s][0], sr)
2481 subrepoutil.writestate(self, newstate)
2481 subrepoutil.writestate(self, newstate)
2482
2482
2483 p1, p2 = self.dirstate.parents()
2483 p1, p2 = self.dirstate.parents()
2484 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2484 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2485 try:
2485 try:
2486 self.hook("precommit", throw=True, parent1=hookp1,
2486 self.hook("precommit", throw=True, parent1=hookp1,
2487 parent2=hookp2)
2487 parent2=hookp2)
2488 with self.transaction('commit'):
2488 with self.transaction('commit'):
2489 ret = self.commitctx(cctx, True)
2489 ret = self.commitctx(cctx, True)
2490 # update bookmarks, dirstate and mergestate
2490 # update bookmarks, dirstate and mergestate
2491 bookmarks.update(self, [p1, p2], ret)
2491 bookmarks.update(self, [p1, p2], ret)
2492 cctx.markcommitted(ret)
2492 cctx.markcommitted(ret)
2493 ms.reset()
2493 ms.reset()
2494 except: # re-raises
2494 except: # re-raises
2495 if edited:
2495 if edited:
2496 self.ui.write(
2496 self.ui.write(
2497 _('note: commit message saved in %s\n') % msgfn)
2497 _('note: commit message saved in %s\n') % msgfn)
2498 raise
2498 raise
2499
2499
2500 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2500 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2501 # hack for command that use a temporary commit (eg: histedit)
2501 # hack for command that use a temporary commit (eg: histedit)
2502 # temporary commit got stripped before hook release
2502 # temporary commit got stripped before hook release
2503 if self.changelog.hasnode(ret):
2503 if self.changelog.hasnode(ret):
2504 self.hook("commit", node=node, parent1=parent1,
2504 self.hook("commit", node=node, parent1=parent1,
2505 parent2=parent2)
2505 parent2=parent2)
2506 self._afterlock(commithook)
2506 self._afterlock(commithook)
2507 return ret
2507 return ret
2508
2508
2509 @unfilteredmethod
2509 @unfilteredmethod
2510 def commitctx(self, ctx, error=False):
2510 def commitctx(self, ctx, error=False):
2511 """Add a new revision to current repository.
2511 """Add a new revision to current repository.
2512 Revision information is passed via the context argument.
2512 Revision information is passed via the context argument.
2513
2513
2514 ctx.files() should list all files involved in this commit, i.e.
2514 ctx.files() should list all files involved in this commit, i.e.
2515 modified/added/removed files. On merge, it may be wider than the
2515 modified/added/removed files. On merge, it may be wider than the
2516 ctx.files() to be committed, since any file nodes derived directly
2516 ctx.files() to be committed, since any file nodes derived directly
2517 from p1 or p2 are excluded from the committed ctx.files().
2517 from p1 or p2 are excluded from the committed ctx.files().
2518 """
2518 """
2519
2519
2520 p1, p2 = ctx.p1(), ctx.p2()
2520 p1, p2 = ctx.p1(), ctx.p2()
2521 user = ctx.user()
2521 user = ctx.user()
2522
2522
2523 with self.lock(), self.transaction("commit") as tr:
2523 with self.lock(), self.transaction("commit") as tr:
2524 trp = weakref.proxy(tr)
2524 trp = weakref.proxy(tr)
2525
2525
2526 if ctx.manifestnode():
2526 if ctx.manifestnode():
2527 # reuse an existing manifest revision
2527 # reuse an existing manifest revision
2528 self.ui.debug('reusing known manifest\n')
2528 self.ui.debug('reusing known manifest\n')
2529 mn = ctx.manifestnode()
2529 mn = ctx.manifestnode()
2530 files = ctx.files()
2530 files = ctx.files()
2531 elif ctx.files():
2531 elif ctx.files():
2532 m1ctx = p1.manifestctx()
2532 m1ctx = p1.manifestctx()
2533 m2ctx = p2.manifestctx()
2533 m2ctx = p2.manifestctx()
2534 mctx = m1ctx.copy()
2534 mctx = m1ctx.copy()
2535
2535
2536 m = mctx.read()
2536 m = mctx.read()
2537 m1 = m1ctx.read()
2537 m1 = m1ctx.read()
2538 m2 = m2ctx.read()
2538 m2 = m2ctx.read()
2539
2539
2540 # check in files
2540 # check in files
2541 added = []
2541 added = []
2542 changed = []
2542 changed = []
2543 removed = list(ctx.removed())
2543 removed = list(ctx.removed())
2544 linkrev = len(self)
2544 linkrev = len(self)
2545 self.ui.note(_("committing files:\n"))
2545 self.ui.note(_("committing files:\n"))
2546 uipathfn = scmutil.getuipathfn(self)
2546 uipathfn = scmutil.getuipathfn(self)
2547 for f in sorted(ctx.modified() + ctx.added()):
2547 for f in sorted(ctx.modified() + ctx.added()):
2548 self.ui.note(uipathfn(f) + "\n")
2548 self.ui.note(uipathfn(f) + "\n")
2549 try:
2549 try:
2550 fctx = ctx[f]
2550 fctx = ctx[f]
2551 if fctx is None:
2551 if fctx is None:
2552 removed.append(f)
2552 removed.append(f)
2553 else:
2553 else:
2554 added.append(f)
2554 added.append(f)
2555 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2555 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2556 trp, changed)
2556 trp, changed)
2557 m.setflag(f, fctx.flags())
2557 m.setflag(f, fctx.flags())
2558 except OSError:
2558 except OSError:
2559 self.ui.warn(_("trouble committing %s!\n") %
2559 self.ui.warn(_("trouble committing %s!\n") %
2560 uipathfn(f))
2560 uipathfn(f))
2561 raise
2561 raise
2562 except IOError as inst:
2562 except IOError as inst:
2563 errcode = getattr(inst, 'errno', errno.ENOENT)
2563 errcode = getattr(inst, 'errno', errno.ENOENT)
2564 if error or errcode and errcode != errno.ENOENT:
2564 if error or errcode and errcode != errno.ENOENT:
2565 self.ui.warn(_("trouble committing %s!\n") %
2565 self.ui.warn(_("trouble committing %s!\n") %
2566 uipathfn(f))
2566 uipathfn(f))
2567 raise
2567 raise
2568
2568
2569 # update manifest
2569 # update manifest
2570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2571 drop = [f for f in removed if f in m]
2571 drop = [f for f in removed if f in m]
2572 for f in drop:
2572 for f in drop:
2573 del m[f]
2573 del m[f]
2574 files = changed + removed
2574 files = changed + removed
2575 md = None
2575 md = None
2576 if not files:
2576 if not files:
2577 # if no "files" actually changed in terms of the changelog,
2577 # if no "files" actually changed in terms of the changelog,
2578 # try hard to detect unmodified manifest entry so that the
2578 # try hard to detect unmodified manifest entry so that the
2579 # exact same commit can be reproduced later on convert.
2579 # exact same commit can be reproduced later on convert.
2580 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2580 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2581 if not files and md:
2581 if not files and md:
2582 self.ui.debug('not reusing manifest (no file change in '
2582 self.ui.debug('not reusing manifest (no file change in '
2583 'changelog, but manifest differs)\n')
2583 'changelog, but manifest differs)\n')
2584 if files or md:
2584 if files or md:
2585 self.ui.note(_("committing manifest\n"))
2585 self.ui.note(_("committing manifest\n"))
2586 # we're using narrowmatch here since it's already applied at
2586 # we're using narrowmatch here since it's already applied at
2587 # other stages (such as dirstate.walk), so we're already
2587 # other stages (such as dirstate.walk), so we're already
2588 # ignoring things outside of narrowspec in most cases. The
2588 # ignoring things outside of narrowspec in most cases. The
2589 # one case where we might have files outside the narrowspec
2589 # one case where we might have files outside the narrowspec
2590 # at this point is merges, and we already error out in the
2590 # at this point is merges, and we already error out in the
2591 # case where the merge has files outside of the narrowspec,
2591 # case where the merge has files outside of the narrowspec,
2592 # so this is safe.
2592 # so this is safe.
2593 mn = mctx.write(trp, linkrev,
2593 mn = mctx.write(trp, linkrev,
2594 p1.manifestnode(), p2.manifestnode(),
2594 p1.manifestnode(), p2.manifestnode(),
2595 added, drop, match=self.narrowmatch())
2595 added, drop, match=self.narrowmatch())
2596 else:
2596 else:
2597 self.ui.debug('reusing manifest form p1 (listed files '
2597 self.ui.debug('reusing manifest form p1 (listed files '
2598 'actually unchanged)\n')
2598 'actually unchanged)\n')
2599 mn = p1.manifestnode()
2599 mn = p1.manifestnode()
2600 else:
2600 else:
2601 self.ui.debug('reusing manifest from p1 (no file change)\n')
2601 self.ui.debug('reusing manifest from p1 (no file change)\n')
2602 mn = p1.manifestnode()
2602 mn = p1.manifestnode()
2603 files = []
2603 files = []
2604
2604
2605 # update changelog
2605 # update changelog
2606 self.ui.note(_("committing changelog\n"))
2606 self.ui.note(_("committing changelog\n"))
2607 self.changelog.delayupdate(tr)
2607 self.changelog.delayupdate(tr)
2608 n = self.changelog.add(mn, files, ctx.description(),
2608 n = self.changelog.add(mn, files, ctx.description(),
2609 trp, p1.node(), p2.node(),
2609 trp, p1.node(), p2.node(),
2610 user, ctx.date(), ctx.extra().copy())
2610 user, ctx.date(), ctx.extra().copy())
2611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2613 parent2=xp2)
2613 parent2=xp2)
2614 # set the new commit is proper phase
2614 # set the new commit is proper phase
2615 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2615 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2616 if targetphase:
2616 if targetphase:
2617 # retract boundary do not alter parent changeset.
2617 # retract boundary do not alter parent changeset.
2618 # if a parent have higher the resulting phase will
2618 # if a parent have higher the resulting phase will
2619 # be compliant anyway
2619 # be compliant anyway
2620 #
2620 #
2621 # if minimal phase was 0 we don't need to retract anything
2621 # if minimal phase was 0 we don't need to retract anything
2622 phases.registernew(self, tr, targetphase, [n])
2622 phases.registernew(self, tr, targetphase, [n])
2623 return n
2623 return n
2624
2624
2625 @unfilteredmethod
2625 @unfilteredmethod
2626 def destroying(self):
2626 def destroying(self):
2627 '''Inform the repository that nodes are about to be destroyed.
2627 '''Inform the repository that nodes are about to be destroyed.
2628 Intended for use by strip and rollback, so there's a common
2628 Intended for use by strip and rollback, so there's a common
2629 place for anything that has to be done before destroying history.
2629 place for anything that has to be done before destroying history.
2630
2630
2631 This is mostly useful for saving state that is in memory and waiting
2631 This is mostly useful for saving state that is in memory and waiting
2632 to be flushed when the current lock is released. Because a call to
2632 to be flushed when the current lock is released. Because a call to
2633 destroyed is imminent, the repo will be invalidated causing those
2633 destroyed is imminent, the repo will be invalidated causing those
2634 changes to stay in memory (waiting for the next unlock), or vanish
2634 changes to stay in memory (waiting for the next unlock), or vanish
2635 completely.
2635 completely.
2636 '''
2636 '''
2637 # When using the same lock to commit and strip, the phasecache is left
2637 # When using the same lock to commit and strip, the phasecache is left
2638 # dirty after committing. Then when we strip, the repo is invalidated,
2638 # dirty after committing. Then when we strip, the repo is invalidated,
2639 # causing those changes to disappear.
2639 # causing those changes to disappear.
2640 if '_phasecache' in vars(self):
2640 if '_phasecache' in vars(self):
2641 self._phasecache.write()
2641 self._phasecache.write()
2642
2642
2643 @unfilteredmethod
2643 @unfilteredmethod
2644 def destroyed(self):
2644 def destroyed(self):
2645 '''Inform the repository that nodes have been destroyed.
2645 '''Inform the repository that nodes have been destroyed.
2646 Intended for use by strip and rollback, so there's a common
2646 Intended for use by strip and rollback, so there's a common
2647 place for anything that has to be done after destroying history.
2647 place for anything that has to be done after destroying history.
2648 '''
2648 '''
2649 # When one tries to:
2649 # When one tries to:
2650 # 1) destroy nodes thus calling this method (e.g. strip)
2650 # 1) destroy nodes thus calling this method (e.g. strip)
2651 # 2) use phasecache somewhere (e.g. commit)
2651 # 2) use phasecache somewhere (e.g. commit)
2652 #
2652 #
2653 # then 2) will fail because the phasecache contains nodes that were
2653 # then 2) will fail because the phasecache contains nodes that were
2654 # removed. We can either remove phasecache from the filecache,
2654 # removed. We can either remove phasecache from the filecache,
2655 # causing it to reload next time it is accessed, or simply filter
2655 # causing it to reload next time it is accessed, or simply filter
2656 # the removed nodes now and write the updated cache.
2656 # the removed nodes now and write the updated cache.
2657 self._phasecache.filterunknown(self)
2657 self._phasecache.filterunknown(self)
2658 self._phasecache.write()
2658 self._phasecache.write()
2659
2659
2660 # refresh all repository caches
2660 # refresh all repository caches
2661 self.updatecaches()
2661 self.updatecaches()
2662
2662
2663 # Ensure the persistent tag cache is updated. Doing it now
2663 # Ensure the persistent tag cache is updated. Doing it now
2664 # means that the tag cache only has to worry about destroyed
2664 # means that the tag cache only has to worry about destroyed
2665 # heads immediately after a strip/rollback. That in turn
2665 # heads immediately after a strip/rollback. That in turn
2666 # guarantees that "cachetip == currenttip" (comparing both rev
2666 # guarantees that "cachetip == currenttip" (comparing both rev
2667 # and node) always means no nodes have been added or destroyed.
2667 # and node) always means no nodes have been added or destroyed.
2668
2668
2669 # XXX this is suboptimal when qrefresh'ing: we strip the current
2669 # XXX this is suboptimal when qrefresh'ing: we strip the current
2670 # head, refresh the tag cache, then immediately add a new head.
2670 # head, refresh the tag cache, then immediately add a new head.
2671 # But I think doing it this way is necessary for the "instant
2671 # But I think doing it this way is necessary for the "instant
2672 # tag cache retrieval" case to work.
2672 # tag cache retrieval" case to work.
2673 self.invalidate()
2673 self.invalidate()
2674
2674
2675 def status(self, node1='.', node2=None, match=None,
2675 def status(self, node1='.', node2=None, match=None,
2676 ignored=False, clean=False, unknown=False,
2676 ignored=False, clean=False, unknown=False,
2677 listsubrepos=False):
2677 listsubrepos=False):
2678 '''a convenience method that calls node1.status(node2)'''
2678 '''a convenience method that calls node1.status(node2)'''
2679 return self[node1].status(node2, match, ignored, clean, unknown,
2679 return self[node1].status(node2, match, ignored, clean, unknown,
2680 listsubrepos)
2680 listsubrepos)
2681
2681
2682 def addpostdsstatus(self, ps):
2682 def addpostdsstatus(self, ps):
2683 """Add a callback to run within the wlock, at the point at which status
2683 """Add a callback to run within the wlock, at the point at which status
2684 fixups happen.
2684 fixups happen.
2685
2685
2686 On status completion, callback(wctx, status) will be called with the
2686 On status completion, callback(wctx, status) will be called with the
2687 wlock held, unless the dirstate has changed from underneath or the wlock
2687 wlock held, unless the dirstate has changed from underneath or the wlock
2688 couldn't be grabbed.
2688 couldn't be grabbed.
2689
2689
2690 Callbacks should not capture and use a cached copy of the dirstate --
2690 Callbacks should not capture and use a cached copy of the dirstate --
2691 it might change in the meanwhile. Instead, they should access the
2691 it might change in the meanwhile. Instead, they should access the
2692 dirstate via wctx.repo().dirstate.
2692 dirstate via wctx.repo().dirstate.
2693
2693
2694 This list is emptied out after each status run -- extensions should
2694 This list is emptied out after each status run -- extensions should
2695 make sure it adds to this list each time dirstate.status is called.
2695 make sure it adds to this list each time dirstate.status is called.
2696 Extensions should also make sure they don't call this for statuses
2696 Extensions should also make sure they don't call this for statuses
2697 that don't involve the dirstate.
2697 that don't involve the dirstate.
2698 """
2698 """
2699
2699
2700 # The list is located here for uniqueness reasons -- it is actually
2700 # The list is located here for uniqueness reasons -- it is actually
2701 # managed by the workingctx, but that isn't unique per-repo.
2701 # managed by the workingctx, but that isn't unique per-repo.
2702 self._postdsstatus.append(ps)
2702 self._postdsstatus.append(ps)
2703
2703
2704 def postdsstatus(self):
2704 def postdsstatus(self):
2705 """Used by workingctx to get the list of post-dirstate-status hooks."""
2705 """Used by workingctx to get the list of post-dirstate-status hooks."""
2706 return self._postdsstatus
2706 return self._postdsstatus
2707
2707
2708 def clearpostdsstatus(self):
2708 def clearpostdsstatus(self):
2709 """Used by workingctx to clear post-dirstate-status hooks."""
2709 """Used by workingctx to clear post-dirstate-status hooks."""
2710 del self._postdsstatus[:]
2710 del self._postdsstatus[:]
2711
2711
2712 def heads(self, start=None):
2712 def heads(self, start=None):
2713 if start is None:
2713 if start is None:
2714 cl = self.changelog
2714 cl = self.changelog
2715 headrevs = reversed(cl.headrevs())
2715 headrevs = reversed(cl.headrevs())
2716 return [cl.node(rev) for rev in headrevs]
2716 return [cl.node(rev) for rev in headrevs]
2717
2717
2718 heads = self.changelog.heads(start)
2718 heads = self.changelog.heads(start)
2719 # sort the output in rev descending order
2719 # sort the output in rev descending order
2720 return sorted(heads, key=self.changelog.rev, reverse=True)
2720 return sorted(heads, key=self.changelog.rev, reverse=True)
2721
2721
2722 def branchheads(self, branch=None, start=None, closed=False):
2722 def branchheads(self, branch=None, start=None, closed=False):
2723 '''return a (possibly filtered) list of heads for the given branch
2723 '''return a (possibly filtered) list of heads for the given branch
2724
2724
2725 Heads are returned in topological order, from newest to oldest.
2725 Heads are returned in topological order, from newest to oldest.
2726 If branch is None, use the dirstate branch.
2726 If branch is None, use the dirstate branch.
2727 If start is not None, return only heads reachable from start.
2727 If start is not None, return only heads reachable from start.
2728 If closed is True, return heads that are marked as closed as well.
2728 If closed is True, return heads that are marked as closed as well.
2729 '''
2729 '''
2730 if branch is None:
2730 if branch is None:
2731 branch = self[None].branch()
2731 branch = self[None].branch()
2732 branches = self.branchmap()
2732 branches = self.branchmap()
2733 if branch not in branches:
2733 if branch not in branches.entries:
2734 return []
2734 return []
2735 # the cache returns heads ordered lowest to highest
2735 # the cache returns heads ordered lowest to highest
2736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2737 if start is not None:
2737 if start is not None:
2738 # filter out the heads that cannot be reached from startrev
2738 # filter out the heads that cannot be reached from startrev
2739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2740 bheads = [h for h in bheads if h in fbheads]
2740 bheads = [h for h in bheads if h in fbheads]
2741 return bheads
2741 return bheads
2742
2742
2743 def branches(self, nodes):
2743 def branches(self, nodes):
2744 if not nodes:
2744 if not nodes:
2745 nodes = [self.changelog.tip()]
2745 nodes = [self.changelog.tip()]
2746 b = []
2746 b = []
2747 for n in nodes:
2747 for n in nodes:
2748 t = n
2748 t = n
2749 while True:
2749 while True:
2750 p = self.changelog.parents(n)
2750 p = self.changelog.parents(n)
2751 if p[1] != nullid or p[0] == nullid:
2751 if p[1] != nullid or p[0] == nullid:
2752 b.append((t, n, p[0], p[1]))
2752 b.append((t, n, p[0], p[1]))
2753 break
2753 break
2754 n = p[0]
2754 n = p[0]
2755 return b
2755 return b
2756
2756
2757 def between(self, pairs):
2757 def between(self, pairs):
2758 r = []
2758 r = []
2759
2759
2760 for top, bottom in pairs:
2760 for top, bottom in pairs:
2761 n, l, i = top, [], 0
2761 n, l, i = top, [], 0
2762 f = 1
2762 f = 1
2763
2763
2764 while n != bottom and n != nullid:
2764 while n != bottom and n != nullid:
2765 p = self.changelog.parents(n)[0]
2765 p = self.changelog.parents(n)[0]
2766 if i == f:
2766 if i == f:
2767 l.append(n)
2767 l.append(n)
2768 f = f * 2
2768 f = f * 2
2769 n = p
2769 n = p
2770 i += 1
2770 i += 1
2771
2771
2772 r.append(l)
2772 r.append(l)
2773
2773
2774 return r
2774 return r
2775
2775
2776 def checkpush(self, pushop):
2776 def checkpush(self, pushop):
2777 """Extensions can override this function if additional checks have
2777 """Extensions can override this function if additional checks have
2778 to be performed before pushing, or call it if they override push
2778 to be performed before pushing, or call it if they override push
2779 command.
2779 command.
2780 """
2780 """
2781
2781
2782 @unfilteredpropertycache
2782 @unfilteredpropertycache
2783 def prepushoutgoinghooks(self):
2783 def prepushoutgoinghooks(self):
2784 """Return util.hooks consists of a pushop with repo, remote, outgoing
2784 """Return util.hooks consists of a pushop with repo, remote, outgoing
2785 methods, which are called before pushing changesets.
2785 methods, which are called before pushing changesets.
2786 """
2786 """
2787 return util.hooks()
2787 return util.hooks()
2788
2788
2789 def pushkey(self, namespace, key, old, new):
2789 def pushkey(self, namespace, key, old, new):
2790 try:
2790 try:
2791 tr = self.currenttransaction()
2791 tr = self.currenttransaction()
2792 hookargs = {}
2792 hookargs = {}
2793 if tr is not None:
2793 if tr is not None:
2794 hookargs.update(tr.hookargs)
2794 hookargs.update(tr.hookargs)
2795 hookargs = pycompat.strkwargs(hookargs)
2795 hookargs = pycompat.strkwargs(hookargs)
2796 hookargs[r'namespace'] = namespace
2796 hookargs[r'namespace'] = namespace
2797 hookargs[r'key'] = key
2797 hookargs[r'key'] = key
2798 hookargs[r'old'] = old
2798 hookargs[r'old'] = old
2799 hookargs[r'new'] = new
2799 hookargs[r'new'] = new
2800 self.hook('prepushkey', throw=True, **hookargs)
2800 self.hook('prepushkey', throw=True, **hookargs)
2801 except error.HookAbort as exc:
2801 except error.HookAbort as exc:
2802 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2802 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2803 if exc.hint:
2803 if exc.hint:
2804 self.ui.write_err(_("(%s)\n") % exc.hint)
2804 self.ui.write_err(_("(%s)\n") % exc.hint)
2805 return False
2805 return False
2806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2807 ret = pushkey.push(self, namespace, key, old, new)
2807 ret = pushkey.push(self, namespace, key, old, new)
2808 def runhook():
2808 def runhook():
2809 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2809 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2810 ret=ret)
2810 ret=ret)
2811 self._afterlock(runhook)
2811 self._afterlock(runhook)
2812 return ret
2812 return ret
2813
2813
2814 def listkeys(self, namespace):
2814 def listkeys(self, namespace):
2815 self.hook('prelistkeys', throw=True, namespace=namespace)
2815 self.hook('prelistkeys', throw=True, namespace=namespace)
2816 self.ui.debug('listing keys for "%s"\n' % namespace)
2816 self.ui.debug('listing keys for "%s"\n' % namespace)
2817 values = pushkey.list(self, namespace)
2817 values = pushkey.list(self, namespace)
2818 self.hook('listkeys', namespace=namespace, values=values)
2818 self.hook('listkeys', namespace=namespace, values=values)
2819 return values
2819 return values
2820
2820
2821 def debugwireargs(self, one, two, three=None, four=None, five=None):
2821 def debugwireargs(self, one, two, three=None, four=None, five=None):
2822 '''used to test argument passing over the wire'''
2822 '''used to test argument passing over the wire'''
2823 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2823 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2824 pycompat.bytestr(four),
2824 pycompat.bytestr(four),
2825 pycompat.bytestr(five))
2825 pycompat.bytestr(five))
2826
2826
2827 def savecommitmessage(self, text):
2827 def savecommitmessage(self, text):
2828 fp = self.vfs('last-message.txt', 'wb')
2828 fp = self.vfs('last-message.txt', 'wb')
2829 try:
2829 try:
2830 fp.write(text)
2830 fp.write(text)
2831 finally:
2831 finally:
2832 fp.close()
2832 fp.close()
2833 return self.pathto(fp.name[len(self.root) + 1:])
2833 return self.pathto(fp.name[len(self.root) + 1:])
2834
2834
2835 # used to avoid circular references so destructors work
2835 # used to avoid circular references so destructors work
2836 def aftertrans(files):
2836 def aftertrans(files):
2837 renamefiles = [tuple(t) for t in files]
2837 renamefiles = [tuple(t) for t in files]
2838 def a():
2838 def a():
2839 for vfs, src, dest in renamefiles:
2839 for vfs, src, dest in renamefiles:
2840 # if src and dest refer to a same file, vfs.rename is a no-op,
2840 # if src and dest refer to a same file, vfs.rename is a no-op,
2841 # leaving both src and dest on disk. delete dest to make sure
2841 # leaving both src and dest on disk. delete dest to make sure
2842 # the rename couldn't be such a no-op.
2842 # the rename couldn't be such a no-op.
2843 vfs.tryunlink(dest)
2843 vfs.tryunlink(dest)
2844 try:
2844 try:
2845 vfs.rename(src, dest)
2845 vfs.rename(src, dest)
2846 except OSError: # journal file does not yet exist
2846 except OSError: # journal file does not yet exist
2847 pass
2847 pass
2848 return a
2848 return a
2849
2849
2850 def undoname(fn):
2850 def undoname(fn):
2851 base, name = os.path.split(fn)
2851 base, name = os.path.split(fn)
2852 assert name.startswith('journal')
2852 assert name.startswith('journal')
2853 return os.path.join(base, name.replace('journal', 'undo', 1))
2853 return os.path.join(base, name.replace('journal', 'undo', 1))
2854
2854
2855 def instance(ui, path, create, intents=None, createopts=None):
2855 def instance(ui, path, create, intents=None, createopts=None):
2856 localpath = util.urllocalpath(path)
2856 localpath = util.urllocalpath(path)
2857 if create:
2857 if create:
2858 createrepository(ui, localpath, createopts=createopts)
2858 createrepository(ui, localpath, createopts=createopts)
2859
2859
2860 return makelocalrepository(ui, localpath, intents=intents)
2860 return makelocalrepository(ui, localpath, intents=intents)
2861
2861
2862 def islocal(path):
2862 def islocal(path):
2863 return True
2863 return True
2864
2864
2865 def defaultcreateopts(ui, createopts=None):
2865 def defaultcreateopts(ui, createopts=None):
2866 """Populate the default creation options for a repository.
2866 """Populate the default creation options for a repository.
2867
2867
2868 A dictionary of explicitly requested creation options can be passed
2868 A dictionary of explicitly requested creation options can be passed
2869 in. Missing keys will be populated.
2869 in. Missing keys will be populated.
2870 """
2870 """
2871 createopts = dict(createopts or {})
2871 createopts = dict(createopts or {})
2872
2872
2873 if 'backend' not in createopts:
2873 if 'backend' not in createopts:
2874 # experimental config: storage.new-repo-backend
2874 # experimental config: storage.new-repo-backend
2875 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2875 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2876
2876
2877 return createopts
2877 return createopts
2878
2878
2879 def newreporequirements(ui, createopts):
2879 def newreporequirements(ui, createopts):
2880 """Determine the set of requirements for a new local repository.
2880 """Determine the set of requirements for a new local repository.
2881
2881
2882 Extensions can wrap this function to specify custom requirements for
2882 Extensions can wrap this function to specify custom requirements for
2883 new repositories.
2883 new repositories.
2884 """
2884 """
2885 # If the repo is being created from a shared repository, we copy
2885 # If the repo is being created from a shared repository, we copy
2886 # its requirements.
2886 # its requirements.
2887 if 'sharedrepo' in createopts:
2887 if 'sharedrepo' in createopts:
2888 requirements = set(createopts['sharedrepo'].requirements)
2888 requirements = set(createopts['sharedrepo'].requirements)
2889 if createopts.get('sharedrelative'):
2889 if createopts.get('sharedrelative'):
2890 requirements.add('relshared')
2890 requirements.add('relshared')
2891 else:
2891 else:
2892 requirements.add('shared')
2892 requirements.add('shared')
2893
2893
2894 return requirements
2894 return requirements
2895
2895
2896 if 'backend' not in createopts:
2896 if 'backend' not in createopts:
2897 raise error.ProgrammingError('backend key not present in createopts; '
2897 raise error.ProgrammingError('backend key not present in createopts; '
2898 'was defaultcreateopts() called?')
2898 'was defaultcreateopts() called?')
2899
2899
2900 if createopts['backend'] != 'revlogv1':
2900 if createopts['backend'] != 'revlogv1':
2901 raise error.Abort(_('unable to determine repository requirements for '
2901 raise error.Abort(_('unable to determine repository requirements for '
2902 'storage backend: %s') % createopts['backend'])
2902 'storage backend: %s') % createopts['backend'])
2903
2903
2904 requirements = {'revlogv1'}
2904 requirements = {'revlogv1'}
2905 if ui.configbool('format', 'usestore'):
2905 if ui.configbool('format', 'usestore'):
2906 requirements.add('store')
2906 requirements.add('store')
2907 if ui.configbool('format', 'usefncache'):
2907 if ui.configbool('format', 'usefncache'):
2908 requirements.add('fncache')
2908 requirements.add('fncache')
2909 if ui.configbool('format', 'dotencode'):
2909 if ui.configbool('format', 'dotencode'):
2910 requirements.add('dotencode')
2910 requirements.add('dotencode')
2911
2911
2912 compengine = ui.config('experimental', 'format.compression')
2912 compengine = ui.config('experimental', 'format.compression')
2913 if compengine not in util.compengines:
2913 if compengine not in util.compengines:
2914 raise error.Abort(_('compression engine %s defined by '
2914 raise error.Abort(_('compression engine %s defined by '
2915 'experimental.format.compression not available') %
2915 'experimental.format.compression not available') %
2916 compengine,
2916 compengine,
2917 hint=_('run "hg debuginstall" to list available '
2917 hint=_('run "hg debuginstall" to list available '
2918 'compression engines'))
2918 'compression engines'))
2919
2919
2920 # zlib is the historical default and doesn't need an explicit requirement.
2920 # zlib is the historical default and doesn't need an explicit requirement.
2921 if compengine != 'zlib':
2921 if compengine != 'zlib':
2922 requirements.add('exp-compression-%s' % compengine)
2922 requirements.add('exp-compression-%s' % compengine)
2923
2923
2924 if scmutil.gdinitconfig(ui):
2924 if scmutil.gdinitconfig(ui):
2925 requirements.add('generaldelta')
2925 requirements.add('generaldelta')
2926 if ui.configbool('format', 'sparse-revlog'):
2926 if ui.configbool('format', 'sparse-revlog'):
2927 requirements.add(SPARSEREVLOG_REQUIREMENT)
2927 requirements.add(SPARSEREVLOG_REQUIREMENT)
2928 if ui.configbool('experimental', 'treemanifest'):
2928 if ui.configbool('experimental', 'treemanifest'):
2929 requirements.add('treemanifest')
2929 requirements.add('treemanifest')
2930
2930
2931 revlogv2 = ui.config('experimental', 'revlogv2')
2931 revlogv2 = ui.config('experimental', 'revlogv2')
2932 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2932 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2933 requirements.remove('revlogv1')
2933 requirements.remove('revlogv1')
2934 # generaldelta is implied by revlogv2.
2934 # generaldelta is implied by revlogv2.
2935 requirements.discard('generaldelta')
2935 requirements.discard('generaldelta')
2936 requirements.add(REVLOGV2_REQUIREMENT)
2936 requirements.add(REVLOGV2_REQUIREMENT)
2937 # experimental config: format.internal-phase
2937 # experimental config: format.internal-phase
2938 if ui.configbool('format', 'internal-phase'):
2938 if ui.configbool('format', 'internal-phase'):
2939 requirements.add('internal-phase')
2939 requirements.add('internal-phase')
2940
2940
2941 if createopts.get('narrowfiles'):
2941 if createopts.get('narrowfiles'):
2942 requirements.add(repository.NARROW_REQUIREMENT)
2942 requirements.add(repository.NARROW_REQUIREMENT)
2943
2943
2944 if createopts.get('lfs'):
2944 if createopts.get('lfs'):
2945 requirements.add('lfs')
2945 requirements.add('lfs')
2946
2946
2947 return requirements
2947 return requirements
2948
2948
2949 def filterknowncreateopts(ui, createopts):
2949 def filterknowncreateopts(ui, createopts):
2950 """Filters a dict of repo creation options against options that are known.
2950 """Filters a dict of repo creation options against options that are known.
2951
2951
2952 Receives a dict of repo creation options and returns a dict of those
2952 Receives a dict of repo creation options and returns a dict of those
2953 options that we don't know how to handle.
2953 options that we don't know how to handle.
2954
2954
2955 This function is called as part of repository creation. If the
2955 This function is called as part of repository creation. If the
2956 returned dict contains any items, repository creation will not
2956 returned dict contains any items, repository creation will not
2957 be allowed, as it means there was a request to create a repository
2957 be allowed, as it means there was a request to create a repository
2958 with options not recognized by loaded code.
2958 with options not recognized by loaded code.
2959
2959
2960 Extensions can wrap this function to filter out creation options
2960 Extensions can wrap this function to filter out creation options
2961 they know how to handle.
2961 they know how to handle.
2962 """
2962 """
2963 known = {
2963 known = {
2964 'backend',
2964 'backend',
2965 'lfs',
2965 'lfs',
2966 'narrowfiles',
2966 'narrowfiles',
2967 'sharedrepo',
2967 'sharedrepo',
2968 'sharedrelative',
2968 'sharedrelative',
2969 'shareditems',
2969 'shareditems',
2970 'shallowfilestore',
2970 'shallowfilestore',
2971 }
2971 }
2972
2972
2973 return {k: v for k, v in createopts.items() if k not in known}
2973 return {k: v for k, v in createopts.items() if k not in known}
2974
2974
2975 def createrepository(ui, path, createopts=None):
2975 def createrepository(ui, path, createopts=None):
2976 """Create a new repository in a vfs.
2976 """Create a new repository in a vfs.
2977
2977
2978 ``path`` path to the new repo's working directory.
2978 ``path`` path to the new repo's working directory.
2979 ``createopts`` options for the new repository.
2979 ``createopts`` options for the new repository.
2980
2980
2981 The following keys for ``createopts`` are recognized:
2981 The following keys for ``createopts`` are recognized:
2982
2982
2983 backend
2983 backend
2984 The storage backend to use.
2984 The storage backend to use.
2985 lfs
2985 lfs
2986 Repository will be created with ``lfs`` requirement. The lfs extension
2986 Repository will be created with ``lfs`` requirement. The lfs extension
2987 will automatically be loaded when the repository is accessed.
2987 will automatically be loaded when the repository is accessed.
2988 narrowfiles
2988 narrowfiles
2989 Set up repository to support narrow file storage.
2989 Set up repository to support narrow file storage.
2990 sharedrepo
2990 sharedrepo
2991 Repository object from which storage should be shared.
2991 Repository object from which storage should be shared.
2992 sharedrelative
2992 sharedrelative
2993 Boolean indicating if the path to the shared repo should be
2993 Boolean indicating if the path to the shared repo should be
2994 stored as relative. By default, the pointer to the "parent" repo
2994 stored as relative. By default, the pointer to the "parent" repo
2995 is stored as an absolute path.
2995 is stored as an absolute path.
2996 shareditems
2996 shareditems
2997 Set of items to share to the new repository (in addition to storage).
2997 Set of items to share to the new repository (in addition to storage).
2998 shallowfilestore
2998 shallowfilestore
2999 Indicates that storage for files should be shallow (not all ancestor
2999 Indicates that storage for files should be shallow (not all ancestor
3000 revisions are known).
3000 revisions are known).
3001 """
3001 """
3002 createopts = defaultcreateopts(ui, createopts=createopts)
3002 createopts = defaultcreateopts(ui, createopts=createopts)
3003
3003
3004 unknownopts = filterknowncreateopts(ui, createopts)
3004 unknownopts = filterknowncreateopts(ui, createopts)
3005
3005
3006 if not isinstance(unknownopts, dict):
3006 if not isinstance(unknownopts, dict):
3007 raise error.ProgrammingError('filterknowncreateopts() did not return '
3007 raise error.ProgrammingError('filterknowncreateopts() did not return '
3008 'a dict')
3008 'a dict')
3009
3009
3010 if unknownopts:
3010 if unknownopts:
3011 raise error.Abort(_('unable to create repository because of unknown '
3011 raise error.Abort(_('unable to create repository because of unknown '
3012 'creation option: %s') %
3012 'creation option: %s') %
3013 ', '.join(sorted(unknownopts)),
3013 ', '.join(sorted(unknownopts)),
3014 hint=_('is a required extension not loaded?'))
3014 hint=_('is a required extension not loaded?'))
3015
3015
3016 requirements = newreporequirements(ui, createopts=createopts)
3016 requirements = newreporequirements(ui, createopts=createopts)
3017
3017
3018 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3018 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3019
3019
3020 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3020 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3021 if hgvfs.exists():
3021 if hgvfs.exists():
3022 raise error.RepoError(_('repository %s already exists') % path)
3022 raise error.RepoError(_('repository %s already exists') % path)
3023
3023
3024 if 'sharedrepo' in createopts:
3024 if 'sharedrepo' in createopts:
3025 sharedpath = createopts['sharedrepo'].sharedpath
3025 sharedpath = createopts['sharedrepo'].sharedpath
3026
3026
3027 if createopts.get('sharedrelative'):
3027 if createopts.get('sharedrelative'):
3028 try:
3028 try:
3029 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3029 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3030 except (IOError, ValueError) as e:
3030 except (IOError, ValueError) as e:
3031 # ValueError is raised on Windows if the drive letters differ
3031 # ValueError is raised on Windows if the drive letters differ
3032 # on each path.
3032 # on each path.
3033 raise error.Abort(_('cannot calculate relative path'),
3033 raise error.Abort(_('cannot calculate relative path'),
3034 hint=stringutil.forcebytestr(e))
3034 hint=stringutil.forcebytestr(e))
3035
3035
3036 if not wdirvfs.exists():
3036 if not wdirvfs.exists():
3037 wdirvfs.makedirs()
3037 wdirvfs.makedirs()
3038
3038
3039 hgvfs.makedir(notindexed=True)
3039 hgvfs.makedir(notindexed=True)
3040 if 'sharedrepo' not in createopts:
3040 if 'sharedrepo' not in createopts:
3041 hgvfs.mkdir(b'cache')
3041 hgvfs.mkdir(b'cache')
3042 hgvfs.mkdir(b'wcache')
3042 hgvfs.mkdir(b'wcache')
3043
3043
3044 if b'store' in requirements and 'sharedrepo' not in createopts:
3044 if b'store' in requirements and 'sharedrepo' not in createopts:
3045 hgvfs.mkdir(b'store')
3045 hgvfs.mkdir(b'store')
3046
3046
3047 # We create an invalid changelog outside the store so very old
3047 # We create an invalid changelog outside the store so very old
3048 # Mercurial versions (which didn't know about the requirements
3048 # Mercurial versions (which didn't know about the requirements
3049 # file) encounter an error on reading the changelog. This
3049 # file) encounter an error on reading the changelog. This
3050 # effectively locks out old clients and prevents them from
3050 # effectively locks out old clients and prevents them from
3051 # mucking with a repo in an unknown format.
3051 # mucking with a repo in an unknown format.
3052 #
3052 #
3053 # The revlog header has version 2, which won't be recognized by
3053 # The revlog header has version 2, which won't be recognized by
3054 # such old clients.
3054 # such old clients.
3055 hgvfs.append(b'00changelog.i',
3055 hgvfs.append(b'00changelog.i',
3056 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3056 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3057 b'layout')
3057 b'layout')
3058
3058
3059 scmutil.writerequires(hgvfs, requirements)
3059 scmutil.writerequires(hgvfs, requirements)
3060
3060
3061 # Write out file telling readers where to find the shared store.
3061 # Write out file telling readers where to find the shared store.
3062 if 'sharedrepo' in createopts:
3062 if 'sharedrepo' in createopts:
3063 hgvfs.write(b'sharedpath', sharedpath)
3063 hgvfs.write(b'sharedpath', sharedpath)
3064
3064
3065 if createopts.get('shareditems'):
3065 if createopts.get('shareditems'):
3066 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3066 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3067 hgvfs.write(b'shared', shared)
3067 hgvfs.write(b'shared', shared)
3068
3068
3069 def poisonrepository(repo):
3069 def poisonrepository(repo):
3070 """Poison a repository instance so it can no longer be used."""
3070 """Poison a repository instance so it can no longer be used."""
3071 # Perform any cleanup on the instance.
3071 # Perform any cleanup on the instance.
3072 repo.close()
3072 repo.close()
3073
3073
3074 # Our strategy is to replace the type of the object with one that
3074 # Our strategy is to replace the type of the object with one that
3075 # has all attribute lookups result in error.
3075 # has all attribute lookups result in error.
3076 #
3076 #
3077 # But we have to allow the close() method because some constructors
3077 # But we have to allow the close() method because some constructors
3078 # of repos call close() on repo references.
3078 # of repos call close() on repo references.
3079 class poisonedrepository(object):
3079 class poisonedrepository(object):
3080 def __getattribute__(self, item):
3080 def __getattribute__(self, item):
3081 if item == r'close':
3081 if item == r'close':
3082 return object.__getattribute__(self, item)
3082 return object.__getattribute__(self, item)
3083
3083
3084 raise error.ProgrammingError('repo instances should not be used '
3084 raise error.ProgrammingError('repo instances should not be used '
3085 'after unshare')
3085 'after unshare')
3086
3086
3087 def close(self):
3087 def close(self):
3088 pass
3088 pass
3089
3089
3090 # We may have a repoview, which intercepts __setattr__. So be sure
3090 # We may have a repoview, which intercepts __setattr__. So be sure
3091 # we operate at the lowest level possible.
3091 # we operate at the lowest level possible.
3092 object.__setattr__(repo, r'__class__', poisonedrepository)
3092 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now