##// END OF EJS Templates
copies: prepare changelog for more copies storage mode...
marmoute -
r43296:0b87eb2f default
parent child Browse files
Show More
@@ -1,672 +1,674 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import (
16 from .thirdparty import (
17 attr,
17 attr,
18 )
18 )
19
19
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 pycompat,
23 pycompat,
24 revlog,
24 revlog,
25 util,
25 util,
26 )
26 )
27 from .utils import (
27 from .utils import (
28 dateutil,
28 dateutil,
29 stringutil,
29 stringutil,
30 )
30 )
31
31
32 _defaultextra = {'branch': 'default'}
32 _defaultextra = {'branch': 'default'}
33
33
34 def _string_escape(text):
34 def _string_escape(text):
35 """
35 """
36 >>> from .pycompat import bytechr as chr
36 >>> from .pycompat import bytechr as chr
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s
39 >>> s
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 >>> res = _string_escape(s)
41 >>> res = _string_escape(s)
42 >>> s == _string_unescape(res)
42 >>> s == _string_unescape(res)
43 True
43 True
44 """
44 """
45 # subset of the string_escape codec
45 # subset of the string_escape codec
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
47 return text.replace('\0', '\\0')
47 return text.replace('\0', '\\0')
48
48
49 def _string_unescape(text):
49 def _string_unescape(text):
50 if '\\0' in text:
50 if '\\0' in text:
51 # fix up \0 without getting into trouble with \\0
51 # fix up \0 without getting into trouble with \\0
52 text = text.replace('\\\\', '\\\\\n')
52 text = text.replace('\\\\', '\\\\\n')
53 text = text.replace('\\0', '\0')
53 text = text.replace('\\0', '\0')
54 text = text.replace('\n', '')
54 text = text.replace('\n', '')
55 return stringutil.unescapestr(text)
55 return stringutil.unescapestr(text)
56
56
57 def decodeextra(text):
57 def decodeextra(text):
58 """
58 """
59 >>> from .pycompat import bytechr as chr
59 >>> from .pycompat import bytechr as chr
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
61 ... ).items())
61 ... ).items())
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
64 ... b'baz': chr(92) + chr(0) + b'2'})
64 ... b'baz': chr(92) + chr(0) + b'2'})
65 ... ).items())
65 ... ).items())
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
67 """
67 """
68 extra = _defaultextra.copy()
68 extra = _defaultextra.copy()
69 for l in text.split('\0'):
69 for l in text.split('\0'):
70 if l:
70 if l:
71 k, v = _string_unescape(l).split(':', 1)
71 k, v = _string_unescape(l).split(':', 1)
72 extra[k] = v
72 extra[k] = v
73 return extra
73 return extra
74
74
75 def encodeextra(d):
75 def encodeextra(d):
76 # keys must be sorted to produce a deterministic changelog entry
76 # keys must be sorted to produce a deterministic changelog entry
77 items = [
77 items = [
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
79 for k in sorted(d)
79 for k in sorted(d)
80 ]
80 ]
81 return "\0".join(items)
81 return "\0".join(items)
82
82
83 def encodecopies(files, copies):
83 def encodecopies(files, copies):
84 items = []
84 items = []
85 for i, dst in enumerate(files):
85 for i, dst in enumerate(files):
86 if dst in copies:
86 if dst in copies:
87 items.append('%d\0%s' % (i, copies[dst]))
87 items.append('%d\0%s' % (i, copies[dst]))
88 if len(items) != len(copies):
88 if len(items) != len(copies):
89 raise error.ProgrammingError('some copy targets missing from file list')
89 raise error.ProgrammingError('some copy targets missing from file list')
90 return "\n".join(items)
90 return "\n".join(items)
91
91
92 def decodecopies(files, data):
92 def decodecopies(files, data):
93 try:
93 try:
94 copies = {}
94 copies = {}
95 if not data:
95 if not data:
96 return copies
96 return copies
97 for l in data.split('\n'):
97 for l in data.split('\n'):
98 strindex, src = l.split('\0')
98 strindex, src = l.split('\0')
99 i = int(strindex)
99 i = int(strindex)
100 dst = files[i]
100 dst = files[i]
101 copies[dst] = src
101 copies[dst] = src
102 return copies
102 return copies
103 except (ValueError, IndexError):
103 except (ValueError, IndexError):
104 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
104 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
105 # used different syntax for the value.
105 # used different syntax for the value.
106 return None
106 return None
107
107
108 def encodefileindices(files, subset):
108 def encodefileindices(files, subset):
109 subset = set(subset)
109 subset = set(subset)
110 indices = []
110 indices = []
111 for i, f in enumerate(files):
111 for i, f in enumerate(files):
112 if f in subset:
112 if f in subset:
113 indices.append('%d' % i)
113 indices.append('%d' % i)
114 return '\n'.join(indices)
114 return '\n'.join(indices)
115
115
116 def decodefileindices(files, data):
116 def decodefileindices(files, data):
117 try:
117 try:
118 subset = []
118 subset = []
119 if not data:
119 if not data:
120 return subset
120 return subset
121 for strindex in data.split('\n'):
121 for strindex in data.split('\n'):
122 i = int(strindex)
122 i = int(strindex)
123 if i < 0 or i >= len(files):
123 if i < 0 or i >= len(files):
124 return None
124 return None
125 subset.append(files[i])
125 subset.append(files[i])
126 return subset
126 return subset
127 except (ValueError, IndexError):
127 except (ValueError, IndexError):
128 # Perhaps someone had chosen the same key name (e.g. "added") and
128 # Perhaps someone had chosen the same key name (e.g. "added") and
129 # used different syntax for the value.
129 # used different syntax for the value.
130 return None
130 return None
131
131
132 def stripdesc(desc):
132 def stripdesc(desc):
133 """strip trailing whitespace and leading and trailing empty lines"""
133 """strip trailing whitespace and leading and trailing empty lines"""
134 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
134 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
135
135
136 class appender(object):
136 class appender(object):
137 '''the changelog index must be updated last on disk, so we use this class
137 '''the changelog index must be updated last on disk, so we use this class
138 to delay writes to it'''
138 to delay writes to it'''
139 def __init__(self, vfs, name, mode, buf):
139 def __init__(self, vfs, name, mode, buf):
140 self.data = buf
140 self.data = buf
141 fp = vfs(name, mode)
141 fp = vfs(name, mode)
142 self.fp = fp
142 self.fp = fp
143 self.offset = fp.tell()
143 self.offset = fp.tell()
144 self.size = vfs.fstat(fp).st_size
144 self.size = vfs.fstat(fp).st_size
145 self._end = self.size
145 self._end = self.size
146
146
147 def end(self):
147 def end(self):
148 return self._end
148 return self._end
149 def tell(self):
149 def tell(self):
150 return self.offset
150 return self.offset
151 def flush(self):
151 def flush(self):
152 pass
152 pass
153
153
154 @property
154 @property
155 def closed(self):
155 def closed(self):
156 return self.fp.closed
156 return self.fp.closed
157
157
158 def close(self):
158 def close(self):
159 self.fp.close()
159 self.fp.close()
160
160
161 def seek(self, offset, whence=0):
161 def seek(self, offset, whence=0):
162 '''virtual file offset spans real file and data'''
162 '''virtual file offset spans real file and data'''
163 if whence == 0:
163 if whence == 0:
164 self.offset = offset
164 self.offset = offset
165 elif whence == 1:
165 elif whence == 1:
166 self.offset += offset
166 self.offset += offset
167 elif whence == 2:
167 elif whence == 2:
168 self.offset = self.end() + offset
168 self.offset = self.end() + offset
169 if self.offset < self.size:
169 if self.offset < self.size:
170 self.fp.seek(self.offset)
170 self.fp.seek(self.offset)
171
171
172 def read(self, count=-1):
172 def read(self, count=-1):
173 '''only trick here is reads that span real file and data'''
173 '''only trick here is reads that span real file and data'''
174 ret = ""
174 ret = ""
175 if self.offset < self.size:
175 if self.offset < self.size:
176 s = self.fp.read(count)
176 s = self.fp.read(count)
177 ret = s
177 ret = s
178 self.offset += len(s)
178 self.offset += len(s)
179 if count > 0:
179 if count > 0:
180 count -= len(s)
180 count -= len(s)
181 if count != 0:
181 if count != 0:
182 doff = self.offset - self.size
182 doff = self.offset - self.size
183 self.data.insert(0, "".join(self.data))
183 self.data.insert(0, "".join(self.data))
184 del self.data[1:]
184 del self.data[1:]
185 s = self.data[0][doff:doff + count]
185 s = self.data[0][doff:doff + count]
186 self.offset += len(s)
186 self.offset += len(s)
187 ret += s
187 ret += s
188 return ret
188 return ret
189
189
190 def write(self, s):
190 def write(self, s):
191 self.data.append(bytes(s))
191 self.data.append(bytes(s))
192 self.offset += len(s)
192 self.offset += len(s)
193 self._end += len(s)
193 self._end += len(s)
194
194
195 def __enter__(self):
195 def __enter__(self):
196 self.fp.__enter__()
196 self.fp.__enter__()
197 return self
197 return self
198
198
199 def __exit__(self, *args):
199 def __exit__(self, *args):
200 return self.fp.__exit__(*args)
200 return self.fp.__exit__(*args)
201
201
202 def _divertopener(opener, target):
202 def _divertopener(opener, target):
203 """build an opener that writes in 'target.a' instead of 'target'"""
203 """build an opener that writes in 'target.a' instead of 'target'"""
204 def _divert(name, mode='r', checkambig=False):
204 def _divert(name, mode='r', checkambig=False):
205 if name != target:
205 if name != target:
206 return opener(name, mode)
206 return opener(name, mode)
207 return opener(name + ".a", mode)
207 return opener(name + ".a", mode)
208 return _divert
208 return _divert
209
209
210 def _delayopener(opener, target, buf):
210 def _delayopener(opener, target, buf):
211 """build an opener that stores chunks in 'buf' instead of 'target'"""
211 """build an opener that stores chunks in 'buf' instead of 'target'"""
212 def _delay(name, mode='r', checkambig=False):
212 def _delay(name, mode='r', checkambig=False):
213 if name != target:
213 if name != target:
214 return opener(name, mode)
214 return opener(name, mode)
215 return appender(opener, name, mode, buf)
215 return appender(opener, name, mode, buf)
216 return _delay
216 return _delay
217
217
218 @attr.s
218 @attr.s
219 class _changelogrevision(object):
219 class _changelogrevision(object):
220 # Extensions might modify _defaultextra, so let the constructor below pass
220 # Extensions might modify _defaultextra, so let the constructor below pass
221 # it in
221 # it in
222 extra = attr.ib()
222 extra = attr.ib()
223 manifest = attr.ib(default=nullid)
223 manifest = attr.ib(default=nullid)
224 user = attr.ib(default='')
224 user = attr.ib(default='')
225 date = attr.ib(default=(0, 0))
225 date = attr.ib(default=(0, 0))
226 files = attr.ib(default=attr.Factory(list))
226 files = attr.ib(default=attr.Factory(list))
227 filesadded = attr.ib(default=None)
227 filesadded = attr.ib(default=None)
228 filesremoved = attr.ib(default=None)
228 filesremoved = attr.ib(default=None)
229 p1copies = attr.ib(default=None)
229 p1copies = attr.ib(default=None)
230 p2copies = attr.ib(default=None)
230 p2copies = attr.ib(default=None)
231 description = attr.ib(default='')
231 description = attr.ib(default='')
232
232
233 class changelogrevision(object):
233 class changelogrevision(object):
234 """Holds results of a parsed changelog revision.
234 """Holds results of a parsed changelog revision.
235
235
236 Changelog revisions consist of multiple pieces of data, including
236 Changelog revisions consist of multiple pieces of data, including
237 the manifest node, user, and date. This object exposes a view into
237 the manifest node, user, and date. This object exposes a view into
238 the parsed object.
238 the parsed object.
239 """
239 """
240
240
241 __slots__ = (
241 __slots__ = (
242 r'_offsets',
242 r'_offsets',
243 r'_text',
243 r'_text',
244 )
244 )
245
245
246 def __new__(cls, text):
246 def __new__(cls, text):
247 if not text:
247 if not text:
248 return _changelogrevision(extra=_defaultextra)
248 return _changelogrevision(extra=_defaultextra)
249
249
250 self = super(changelogrevision, cls).__new__(cls)
250 self = super(changelogrevision, cls).__new__(cls)
251 # We could return here and implement the following as an __init__.
251 # We could return here and implement the following as an __init__.
252 # But doing it here is equivalent and saves an extra function call.
252 # But doing it here is equivalent and saves an extra function call.
253
253
254 # format used:
254 # format used:
255 # nodeid\n : manifest node in ascii
255 # nodeid\n : manifest node in ascii
256 # user\n : user, no \n or \r allowed
256 # user\n : user, no \n or \r allowed
257 # time tz extra\n : date (time is int or float, timezone is int)
257 # time tz extra\n : date (time is int or float, timezone is int)
258 # : extra is metadata, encoded and separated by '\0'
258 # : extra is metadata, encoded and separated by '\0'
259 # : older versions ignore it
259 # : older versions ignore it
260 # files\n\n : files modified by the cset, no \n or \r allowed
260 # files\n\n : files modified by the cset, no \n or \r allowed
261 # (.*) : comment (free text, ideally utf-8)
261 # (.*) : comment (free text, ideally utf-8)
262 #
262 #
263 # changelog v0 doesn't use extra
263 # changelog v0 doesn't use extra
264
264
265 nl1 = text.index('\n')
265 nl1 = text.index('\n')
266 nl2 = text.index('\n', nl1 + 1)
266 nl2 = text.index('\n', nl1 + 1)
267 nl3 = text.index('\n', nl2 + 1)
267 nl3 = text.index('\n', nl2 + 1)
268
268
269 # The list of files may be empty. Which means nl3 is the first of the
269 # The list of files may be empty. Which means nl3 is the first of the
270 # double newline that precedes the description.
270 # double newline that precedes the description.
271 if text[nl3 + 1:nl3 + 2] == '\n':
271 if text[nl3 + 1:nl3 + 2] == '\n':
272 doublenl = nl3
272 doublenl = nl3
273 else:
273 else:
274 doublenl = text.index('\n\n', nl3 + 1)
274 doublenl = text.index('\n\n', nl3 + 1)
275
275
276 self._offsets = (nl1, nl2, nl3, doublenl)
276 self._offsets = (nl1, nl2, nl3, doublenl)
277 self._text = text
277 self._text = text
278
278
279 return self
279 return self
280
280
281 @property
281 @property
282 def manifest(self):
282 def manifest(self):
283 return bin(self._text[0:self._offsets[0]])
283 return bin(self._text[0:self._offsets[0]])
284
284
285 @property
285 @property
286 def user(self):
286 def user(self):
287 off = self._offsets
287 off = self._offsets
288 return encoding.tolocal(self._text[off[0] + 1:off[1]])
288 return encoding.tolocal(self._text[off[0] + 1:off[1]])
289
289
290 @property
290 @property
291 def _rawdate(self):
291 def _rawdate(self):
292 off = self._offsets
292 off = self._offsets
293 dateextra = self._text[off[1] + 1:off[2]]
293 dateextra = self._text[off[1] + 1:off[2]]
294 return dateextra.split(' ', 2)[0:2]
294 return dateextra.split(' ', 2)[0:2]
295
295
296 @property
296 @property
297 def _rawextra(self):
297 def _rawextra(self):
298 off = self._offsets
298 off = self._offsets
299 dateextra = self._text[off[1] + 1:off[2]]
299 dateextra = self._text[off[1] + 1:off[2]]
300 fields = dateextra.split(' ', 2)
300 fields = dateextra.split(' ', 2)
301 if len(fields) != 3:
301 if len(fields) != 3:
302 return None
302 return None
303
303
304 return fields[2]
304 return fields[2]
305
305
306 @property
306 @property
307 def date(self):
307 def date(self):
308 raw = self._rawdate
308 raw = self._rawdate
309 time = float(raw[0])
309 time = float(raw[0])
310 # Various tools did silly things with the timezone.
310 # Various tools did silly things with the timezone.
311 try:
311 try:
312 timezone = int(raw[1])
312 timezone = int(raw[1])
313 except ValueError:
313 except ValueError:
314 timezone = 0
314 timezone = 0
315
315
316 return time, timezone
316 return time, timezone
317
317
318 @property
318 @property
319 def extra(self):
319 def extra(self):
320 raw = self._rawextra
320 raw = self._rawextra
321 if raw is None:
321 if raw is None:
322 return _defaultextra
322 return _defaultextra
323
323
324 return decodeextra(raw)
324 return decodeextra(raw)
325
325
326 @property
326 @property
327 def files(self):
327 def files(self):
328 off = self._offsets
328 off = self._offsets
329 if off[2] == off[3]:
329 if off[2] == off[3]:
330 return []
330 return []
331
331
332 return self._text[off[2] + 1:off[3]].split('\n')
332 return self._text[off[2] + 1:off[3]].split('\n')
333
333
334 @property
334 @property
335 def filesadded(self):
335 def filesadded(self):
336 rawindices = self.extra.get('filesadded')
336 rawindices = self.extra.get('filesadded')
337 return rawindices and decodefileindices(self.files, rawindices)
337 return rawindices and decodefileindices(self.files, rawindices)
338
338
339 @property
339 @property
340 def filesremoved(self):
340 def filesremoved(self):
341 rawindices = self.extra.get('filesremoved')
341 rawindices = self.extra.get('filesremoved')
342 return rawindices and decodefileindices(self.files, rawindices)
342 return rawindices and decodefileindices(self.files, rawindices)
343
343
344 @property
344 @property
345 def p1copies(self):
345 def p1copies(self):
346 rawcopies = self.extra.get('p1copies')
346 rawcopies = self.extra.get('p1copies')
347 return rawcopies and decodecopies(self.files, rawcopies)
347 return rawcopies and decodecopies(self.files, rawcopies)
348
348
349 @property
349 @property
350 def p2copies(self):
350 def p2copies(self):
351 rawcopies = self.extra.get('p2copies')
351 rawcopies = self.extra.get('p2copies')
352 return rawcopies and decodecopies(self.files, rawcopies)
352 return rawcopies and decodecopies(self.files, rawcopies)
353
353
354 @property
354 @property
355 def description(self):
355 def description(self):
356 return encoding.tolocal(self._text[self._offsets[3] + 2:])
356 return encoding.tolocal(self._text[self._offsets[3] + 2:])
357
357
358 class changelog(revlog.revlog):
358 class changelog(revlog.revlog):
359 def __init__(self, opener, trypending=False):
359 def __init__(self, opener, trypending=False):
360 """Load a changelog revlog using an opener.
360 """Load a changelog revlog using an opener.
361
361
362 If ``trypending`` is true, we attempt to load the index from a
362 If ``trypending`` is true, we attempt to load the index from a
363 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
363 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
364 The ``00changelog.i.a`` file contains index (and possibly inline
364 The ``00changelog.i.a`` file contains index (and possibly inline
365 revision) data for a transaction that hasn't been finalized yet.
365 revision) data for a transaction that hasn't been finalized yet.
366 It exists in a separate file to facilitate readers (such as
366 It exists in a separate file to facilitate readers (such as
367 hooks processes) accessing data before a transaction is finalized.
367 hooks processes) accessing data before a transaction is finalized.
368 """
368 """
369 if trypending and opener.exists('00changelog.i.a'):
369 if trypending and opener.exists('00changelog.i.a'):
370 indexfile = '00changelog.i.a'
370 indexfile = '00changelog.i.a'
371 else:
371 else:
372 indexfile = '00changelog.i'
372 indexfile = '00changelog.i'
373
373
374 datafile = '00changelog.d'
374 datafile = '00changelog.d'
375 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
375 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
376 checkambig=True, mmaplargeindex=True)
376 checkambig=True, mmaplargeindex=True)
377
377
378 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
378 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
379 # changelogs don't benefit from generaldelta.
379 # changelogs don't benefit from generaldelta.
380
380
381 self.version &= ~revlog.FLAG_GENERALDELTA
381 self.version &= ~revlog.FLAG_GENERALDELTA
382 self._generaldelta = False
382 self._generaldelta = False
383
383
384 # Delta chains for changelogs tend to be very small because entries
384 # Delta chains for changelogs tend to be very small because entries
385 # tend to be small and don't delta well with each. So disable delta
385 # tend to be small and don't delta well with each. So disable delta
386 # chains.
386 # chains.
387 self._storedeltachains = False
387 self._storedeltachains = False
388
388
389 self._realopener = opener
389 self._realopener = opener
390 self._delayed = False
390 self._delayed = False
391 self._delaybuf = None
391 self._delaybuf = None
392 self._divert = False
392 self._divert = False
393 self.filteredrevs = frozenset()
393 self.filteredrevs = frozenset()
394 self._copiesstorage = opener.options.get('copies-storage')
394
395
395 def tiprev(self):
396 def tiprev(self):
396 for i in pycompat.xrange(len(self) -1, -2, -1):
397 for i in pycompat.xrange(len(self) -1, -2, -1):
397 if i not in self.filteredrevs:
398 if i not in self.filteredrevs:
398 return i
399 return i
399
400
400 def tip(self):
401 def tip(self):
401 """filtered version of revlog.tip"""
402 """filtered version of revlog.tip"""
402 return self.node(self.tiprev())
403 return self.node(self.tiprev())
403
404
404 def __contains__(self, rev):
405 def __contains__(self, rev):
405 """filtered version of revlog.__contains__"""
406 """filtered version of revlog.__contains__"""
406 return (0 <= rev < len(self)
407 return (0 <= rev < len(self)
407 and rev not in self.filteredrevs)
408 and rev not in self.filteredrevs)
408
409
409 def __iter__(self):
410 def __iter__(self):
410 """filtered version of revlog.__iter__"""
411 """filtered version of revlog.__iter__"""
411 if len(self.filteredrevs) == 0:
412 if len(self.filteredrevs) == 0:
412 return revlog.revlog.__iter__(self)
413 return revlog.revlog.__iter__(self)
413
414
414 def filterediter():
415 def filterediter():
415 for i in pycompat.xrange(len(self)):
416 for i in pycompat.xrange(len(self)):
416 if i not in self.filteredrevs:
417 if i not in self.filteredrevs:
417 yield i
418 yield i
418
419
419 return filterediter()
420 return filterediter()
420
421
421 def revs(self, start=0, stop=None):
422 def revs(self, start=0, stop=None):
422 """filtered version of revlog.revs"""
423 """filtered version of revlog.revs"""
423 for i in super(changelog, self).revs(start, stop):
424 for i in super(changelog, self).revs(start, stop):
424 if i not in self.filteredrevs:
425 if i not in self.filteredrevs:
425 yield i
426 yield i
426
427
427 def _checknofilteredinrevs(self, revs):
428 def _checknofilteredinrevs(self, revs):
428 """raise the appropriate error if 'revs' contains a filtered revision
429 """raise the appropriate error if 'revs' contains a filtered revision
429
430
430 This returns a version of 'revs' to be used thereafter by the caller.
431 This returns a version of 'revs' to be used thereafter by the caller.
431 In particular, if revs is an iterator, it is converted into a set.
432 In particular, if revs is an iterator, it is converted into a set.
432 """
433 """
433 safehasattr = util.safehasattr
434 safehasattr = util.safehasattr
434 if safehasattr(revs, '__next__'):
435 if safehasattr(revs, '__next__'):
435 # Note that inspect.isgenerator() is not true for iterators,
436 # Note that inspect.isgenerator() is not true for iterators,
436 revs = set(revs)
437 revs = set(revs)
437
438
438 filteredrevs = self.filteredrevs
439 filteredrevs = self.filteredrevs
439 if safehasattr(revs, 'first'): # smartset
440 if safehasattr(revs, 'first'): # smartset
440 offenders = revs & filteredrevs
441 offenders = revs & filteredrevs
441 else:
442 else:
442 offenders = filteredrevs.intersection(revs)
443 offenders = filteredrevs.intersection(revs)
443
444
444 for rev in offenders:
445 for rev in offenders:
445 raise error.FilteredIndexError(rev)
446 raise error.FilteredIndexError(rev)
446 return revs
447 return revs
447
448
448 def headrevs(self, revs=None):
449 def headrevs(self, revs=None):
449 if revs is None and self.filteredrevs:
450 if revs is None and self.filteredrevs:
450 try:
451 try:
451 return self.index.headrevsfiltered(self.filteredrevs)
452 return self.index.headrevsfiltered(self.filteredrevs)
452 # AttributeError covers non-c-extension environments and
453 # AttributeError covers non-c-extension environments and
453 # old c extensions without filter handling.
454 # old c extensions without filter handling.
454 except AttributeError:
455 except AttributeError:
455 return self._headrevs()
456 return self._headrevs()
456
457
457 if self.filteredrevs:
458 if self.filteredrevs:
458 revs = self._checknofilteredinrevs(revs)
459 revs = self._checknofilteredinrevs(revs)
459 return super(changelog, self).headrevs(revs)
460 return super(changelog, self).headrevs(revs)
460
461
461 def strip(self, *args, **kwargs):
462 def strip(self, *args, **kwargs):
462 # XXX make something better than assert
463 # XXX make something better than assert
463 # We can't expect proper strip behavior if we are filtered.
464 # We can't expect proper strip behavior if we are filtered.
464 assert not self.filteredrevs
465 assert not self.filteredrevs
465 super(changelog, self).strip(*args, **kwargs)
466 super(changelog, self).strip(*args, **kwargs)
466
467
467 def rev(self, node):
468 def rev(self, node):
468 """filtered version of revlog.rev"""
469 """filtered version of revlog.rev"""
469 r = super(changelog, self).rev(node)
470 r = super(changelog, self).rev(node)
470 if r in self.filteredrevs:
471 if r in self.filteredrevs:
471 raise error.FilteredLookupError(hex(node), self.indexfile,
472 raise error.FilteredLookupError(hex(node), self.indexfile,
472 _('filtered node'))
473 _('filtered node'))
473 return r
474 return r
474
475
475 def node(self, rev):
476 def node(self, rev):
476 """filtered version of revlog.node"""
477 """filtered version of revlog.node"""
477 if rev in self.filteredrevs:
478 if rev in self.filteredrevs:
478 raise error.FilteredIndexError(rev)
479 raise error.FilteredIndexError(rev)
479 return super(changelog, self).node(rev)
480 return super(changelog, self).node(rev)
480
481
481 def linkrev(self, rev):
482 def linkrev(self, rev):
482 """filtered version of revlog.linkrev"""
483 """filtered version of revlog.linkrev"""
483 if rev in self.filteredrevs:
484 if rev in self.filteredrevs:
484 raise error.FilteredIndexError(rev)
485 raise error.FilteredIndexError(rev)
485 return super(changelog, self).linkrev(rev)
486 return super(changelog, self).linkrev(rev)
486
487
487 def parentrevs(self, rev):
488 def parentrevs(self, rev):
488 """filtered version of revlog.parentrevs"""
489 """filtered version of revlog.parentrevs"""
489 if rev in self.filteredrevs:
490 if rev in self.filteredrevs:
490 raise error.FilteredIndexError(rev)
491 raise error.FilteredIndexError(rev)
491 return super(changelog, self).parentrevs(rev)
492 return super(changelog, self).parentrevs(rev)
492
493
493 def flags(self, rev):
494 def flags(self, rev):
494 """filtered version of revlog.flags"""
495 """filtered version of revlog.flags"""
495 if rev in self.filteredrevs:
496 if rev in self.filteredrevs:
496 raise error.FilteredIndexError(rev)
497 raise error.FilteredIndexError(rev)
497 return super(changelog, self).flags(rev)
498 return super(changelog, self).flags(rev)
498
499
499 def delayupdate(self, tr):
500 def delayupdate(self, tr):
500 "delay visibility of index updates to other readers"
501 "delay visibility of index updates to other readers"
501
502
502 if not self._delayed:
503 if not self._delayed:
503 if len(self) == 0:
504 if len(self) == 0:
504 self._divert = True
505 self._divert = True
505 if self._realopener.exists(self.indexfile + '.a'):
506 if self._realopener.exists(self.indexfile + '.a'):
506 self._realopener.unlink(self.indexfile + '.a')
507 self._realopener.unlink(self.indexfile + '.a')
507 self.opener = _divertopener(self._realopener, self.indexfile)
508 self.opener = _divertopener(self._realopener, self.indexfile)
508 else:
509 else:
509 self._delaybuf = []
510 self._delaybuf = []
510 self.opener = _delayopener(self._realopener, self.indexfile,
511 self.opener = _delayopener(self._realopener, self.indexfile,
511 self._delaybuf)
512 self._delaybuf)
512 self._delayed = True
513 self._delayed = True
513 tr.addpending('cl-%i' % id(self), self._writepending)
514 tr.addpending('cl-%i' % id(self), self._writepending)
514 tr.addfinalize('cl-%i' % id(self), self._finalize)
515 tr.addfinalize('cl-%i' % id(self), self._finalize)
515
516
516 def _finalize(self, tr):
517 def _finalize(self, tr):
517 "finalize index updates"
518 "finalize index updates"
518 self._delayed = False
519 self._delayed = False
519 self.opener = self._realopener
520 self.opener = self._realopener
520 # move redirected index data back into place
521 # move redirected index data back into place
521 if self._divert:
522 if self._divert:
522 assert not self._delaybuf
523 assert not self._delaybuf
523 tmpname = self.indexfile + ".a"
524 tmpname = self.indexfile + ".a"
524 nfile = self.opener.open(tmpname)
525 nfile = self.opener.open(tmpname)
525 nfile.close()
526 nfile.close()
526 self.opener.rename(tmpname, self.indexfile, checkambig=True)
527 self.opener.rename(tmpname, self.indexfile, checkambig=True)
527 elif self._delaybuf:
528 elif self._delaybuf:
528 fp = self.opener(self.indexfile, 'a', checkambig=True)
529 fp = self.opener(self.indexfile, 'a', checkambig=True)
529 fp.write("".join(self._delaybuf))
530 fp.write("".join(self._delaybuf))
530 fp.close()
531 fp.close()
531 self._delaybuf = None
532 self._delaybuf = None
532 self._divert = False
533 self._divert = False
533 # split when we're done
534 # split when we're done
534 self._enforceinlinesize(tr)
535 self._enforceinlinesize(tr)
535
536
536 def _writepending(self, tr):
537 def _writepending(self, tr):
537 "create a file containing the unfinalized state for pretxnchangegroup"
538 "create a file containing the unfinalized state for pretxnchangegroup"
538 if self._delaybuf:
539 if self._delaybuf:
539 # make a temporary copy of the index
540 # make a temporary copy of the index
540 fp1 = self._realopener(self.indexfile)
541 fp1 = self._realopener(self.indexfile)
541 pendingfilename = self.indexfile + ".a"
542 pendingfilename = self.indexfile + ".a"
542 # register as a temp file to ensure cleanup on failure
543 # register as a temp file to ensure cleanup on failure
543 tr.registertmp(pendingfilename)
544 tr.registertmp(pendingfilename)
544 # write existing data
545 # write existing data
545 fp2 = self._realopener(pendingfilename, "w")
546 fp2 = self._realopener(pendingfilename, "w")
546 fp2.write(fp1.read())
547 fp2.write(fp1.read())
547 # add pending data
548 # add pending data
548 fp2.write("".join(self._delaybuf))
549 fp2.write("".join(self._delaybuf))
549 fp2.close()
550 fp2.close()
550 # switch modes so finalize can simply rename
551 # switch modes so finalize can simply rename
551 self._delaybuf = None
552 self._delaybuf = None
552 self._divert = True
553 self._divert = True
553 self.opener = _divertopener(self._realopener, self.indexfile)
554 self.opener = _divertopener(self._realopener, self.indexfile)
554
555
555 if self._divert:
556 if self._divert:
556 return True
557 return True
557
558
558 return False
559 return False
559
560
560 def _enforceinlinesize(self, tr, fp=None):
561 def _enforceinlinesize(self, tr, fp=None):
561 if not self._delayed:
562 if not self._delayed:
562 revlog.revlog._enforceinlinesize(self, tr, fp)
563 revlog.revlog._enforceinlinesize(self, tr, fp)
563
564
564 def read(self, node):
565 def read(self, node):
565 """Obtain data from a parsed changelog revision.
566 """Obtain data from a parsed changelog revision.
566
567
567 Returns a 6-tuple of:
568 Returns a 6-tuple of:
568
569
569 - manifest node in binary
570 - manifest node in binary
570 - author/user as a localstr
571 - author/user as a localstr
571 - date as a 2-tuple of (time, timezone)
572 - date as a 2-tuple of (time, timezone)
572 - list of files
573 - list of files
573 - commit message as a localstr
574 - commit message as a localstr
574 - dict of extra metadata
575 - dict of extra metadata
575
576
576 Unless you need to access all fields, consider calling
577 Unless you need to access all fields, consider calling
577 ``changelogrevision`` instead, as it is faster for partial object
578 ``changelogrevision`` instead, as it is faster for partial object
578 access.
579 access.
579 """
580 """
580 c = changelogrevision(self.revision(node))
581 c = changelogrevision(self.revision(node))
581 return (
582 return (
582 c.manifest,
583 c.manifest,
583 c.user,
584 c.user,
584 c.date,
585 c.date,
585 c.files,
586 c.files,
586 c.description,
587 c.description,
587 c.extra
588 c.extra
588 )
589 )
589
590
590 def changelogrevision(self, nodeorrev):
591 def changelogrevision(self, nodeorrev):
591 """Obtain a ``changelogrevision`` for a node or revision."""
592 """Obtain a ``changelogrevision`` for a node or revision."""
592 return changelogrevision(self.revision(nodeorrev))
593 return changelogrevision(self.revision(nodeorrev))
593
594
594 def readfiles(self, node):
595 def readfiles(self, node):
595 """
596 """
596 short version of read that only returns the files modified by the cset
597 short version of read that only returns the files modified by the cset
597 """
598 """
598 text = self.revision(node)
599 text = self.revision(node)
599 if not text:
600 if not text:
600 return []
601 return []
601 last = text.index("\n\n")
602 last = text.index("\n\n")
602 l = text[:last].split('\n')
603 l = text[:last].split('\n')
603 return l[3:]
604 return l[3:]
604
605
605 def add(self, manifest, files, desc, transaction, p1, p2,
606 def add(self, manifest, files, desc, transaction, p1, p2,
606 user, date=None, extra=None, p1copies=None, p2copies=None,
607 user, date=None, extra=None, p1copies=None, p2copies=None,
607 filesadded=None, filesremoved=None):
608 filesadded=None, filesremoved=None):
608 # Convert to UTF-8 encoded bytestrings as the very first
609 # Convert to UTF-8 encoded bytestrings as the very first
609 # thing: calling any method on a localstr object will turn it
610 # thing: calling any method on a localstr object will turn it
610 # into a str object and the cached UTF-8 string is thus lost.
611 # into a str object and the cached UTF-8 string is thus lost.
611 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
612 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
612
613
613 user = user.strip()
614 user = user.strip()
614 # An empty username or a username with a "\n" will make the
615 # An empty username or a username with a "\n" will make the
615 # revision text contain two "\n\n" sequences -> corrupt
616 # revision text contain two "\n\n" sequences -> corrupt
616 # repository since read cannot unpack the revision.
617 # repository since read cannot unpack the revision.
617 if not user:
618 if not user:
618 raise error.StorageError(_("empty username"))
619 raise error.StorageError(_("empty username"))
619 if "\n" in user:
620 if "\n" in user:
620 raise error.StorageError(_("username %r contains a newline")
621 raise error.StorageError(_("username %r contains a newline")
621 % pycompat.bytestr(user))
622 % pycompat.bytestr(user))
622
623
623 desc = stripdesc(desc)
624 desc = stripdesc(desc)
624
625
625 if date:
626 if date:
626 parseddate = "%d %d" % dateutil.parsedate(date)
627 parseddate = "%d %d" % dateutil.parsedate(date)
627 else:
628 else:
628 parseddate = "%d %d" % dateutil.makedate()
629 parseddate = "%d %d" % dateutil.makedate()
629 if extra:
630 if extra:
630 branch = extra.get("branch")
631 branch = extra.get("branch")
631 if branch in ("default", ""):
632 if branch in ("default", ""):
632 del extra["branch"]
633 del extra["branch"]
633 elif branch in (".", "null", "tip"):
634 elif branch in (".", "null", "tip"):
634 raise error.StorageError(_('the name \'%s\' is reserved')
635 raise error.StorageError(_('the name \'%s\' is reserved')
635 % branch)
636 % branch)
636 extrasentries = p1copies, p2copies, filesadded, filesremoved
637 if extra is None and any(x is not None for x in extrasentries):
638 extra = {}
639 sortedfiles = sorted(files)
637 sortedfiles = sorted(files)
640 if extra is not None:
638 if extra is not None:
641 for name in ('p1copies', 'p2copies', 'filesadded', 'filesremoved'):
639 for name in ('p1copies', 'p2copies', 'filesadded', 'filesremoved'):
642 extra.pop(name, None)
640 extra.pop(name, None)
641 if self._copiesstorage == 'extra':
642 extrasentries = p1copies, p2copies, filesadded, filesremoved
643 if extra is None and any(x is not None for x in extrasentries):
644 extra = {}
643 if p1copies is not None:
645 if p1copies is not None:
644 extra['p1copies'] = encodecopies(sortedfiles, p1copies)
646 extra['p1copies'] = encodecopies(sortedfiles, p1copies)
645 if p2copies is not None:
647 if p2copies is not None:
646 extra['p2copies'] = encodecopies(sortedfiles, p2copies)
648 extra['p2copies'] = encodecopies(sortedfiles, p2copies)
647 if filesadded is not None:
649 if filesadded is not None:
648 extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
650 extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
649 if filesremoved is not None:
651 if filesremoved is not None:
650 extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
652 extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
651
653
652 if extra:
654 if extra:
653 extra = encodeextra(extra)
655 extra = encodeextra(extra)
654 parseddate = "%s %s" % (parseddate, extra)
656 parseddate = "%s %s" % (parseddate, extra)
655 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
657 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
656 text = "\n".join(l)
658 text = "\n".join(l)
657 return self.addrevision(text, transaction, len(self), p1, p2)
659 return self.addrevision(text, transaction, len(self), p1, p2)
658
660
659 def branchinfo(self, rev):
661 def branchinfo(self, rev):
660 """return the branch name and open/close state of a revision
662 """return the branch name and open/close state of a revision
661
663
662 This function exists because creating a changectx object
664 This function exists because creating a changectx object
663 just to access this is costly."""
665 just to access this is costly."""
664 extra = self.read(rev)[5]
666 extra = self.read(rev)[5]
665 return encoding.tolocal(extra.get("branch")), 'close' in extra
667 return encoding.tolocal(extra.get("branch")), 'close' in extra
666
668
667 def _nodeduplicatecallback(self, transaction, node):
669 def _nodeduplicatecallback(self, transaction, node):
668 # keep track of revisions that got "re-added", eg: unbunde of know rev.
670 # keep track of revisions that got "re-added", eg: unbunde of know rev.
669 #
671 #
670 # We track them in a list to preserve their order from the source bundle
672 # We track them in a list to preserve their order from the source bundle
671 duplicates = transaction.changes.setdefault('revduplicates', [])
673 duplicates = transaction.changes.setdefault('revduplicates', [])
672 duplicates.append(self.rev(node))
674 duplicates.append(self.rev(node))
@@ -1,3318 +1,3323 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 discovery,
35 discovery,
36 encoding,
36 encoding,
37 error,
37 error,
38 exchange,
38 exchange,
39 extensions,
39 extensions,
40 filelog,
40 filelog,
41 hook,
41 hook,
42 lock as lockmod,
42 lock as lockmod,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 sparse,
57 sparse,
58 store as storemod,
58 store as storemod,
59 subrepoutil,
59 subrepoutil,
60 tags as tagsmod,
60 tags as tagsmod,
61 transaction,
61 transaction,
62 txnutil,
62 txnutil,
63 util,
63 util,
64 vfs as vfsmod,
64 vfs as vfsmod,
65 )
65 )
66
66
67 from .interfaces import (
67 from .interfaces import (
68 repository,
68 repository,
69 util as interfaceutil,
69 util as interfaceutil,
70 )
70 )
71
71
72 from .utils import (
72 from .utils import (
73 procutil,
73 procutil,
74 stringutil,
74 stringutil,
75 )
75 )
76
76
77 from .revlogutils import (
77 from .revlogutils import (
78 constants as revlogconst,
78 constants as revlogconst,
79 )
79 )
80
80
81 release = lockmod.release
81 release = lockmod.release
82 urlerr = util.urlerr
82 urlerr = util.urlerr
83 urlreq = util.urlreq
83 urlreq = util.urlreq
84
84
85 # set of (path, vfs-location) tuples. vfs-location is:
85 # set of (path, vfs-location) tuples. vfs-location is:
86 # - 'plain for vfs relative paths
86 # - 'plain for vfs relative paths
87 # - '' for svfs relative paths
87 # - '' for svfs relative paths
88 _cachedfiles = set()
88 _cachedfiles = set()
89
89
90 class _basefilecache(scmutil.filecache):
90 class _basefilecache(scmutil.filecache):
91 """All filecache usage on repo are done for logic that should be unfiltered
91 """All filecache usage on repo are done for logic that should be unfiltered
92 """
92 """
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 if repo is None:
94 if repo is None:
95 return self
95 return self
96 # proxy to unfiltered __dict__ since filtered repo has no entry
96 # proxy to unfiltered __dict__ since filtered repo has no entry
97 unfi = repo.unfiltered()
97 unfi = repo.unfiltered()
98 try:
98 try:
99 return unfi.__dict__[self.sname]
99 return unfi.__dict__[self.sname]
100 except KeyError:
100 except KeyError:
101 pass
101 pass
102 return super(_basefilecache, self).__get__(unfi, type)
102 return super(_basefilecache, self).__get__(unfi, type)
103
103
104 def set(self, repo, value):
104 def set(self, repo, value):
105 return super(_basefilecache, self).set(repo.unfiltered(), value)
105 return super(_basefilecache, self).set(repo.unfiltered(), value)
106
106
107 class repofilecache(_basefilecache):
107 class repofilecache(_basefilecache):
108 """filecache for files in .hg but outside of .hg/store"""
108 """filecache for files in .hg but outside of .hg/store"""
109 def __init__(self, *paths):
109 def __init__(self, *paths):
110 super(repofilecache, self).__init__(*paths)
110 super(repofilecache, self).__init__(*paths)
111 for path in paths:
111 for path in paths:
112 _cachedfiles.add((path, 'plain'))
112 _cachedfiles.add((path, 'plain'))
113
113
114 def join(self, obj, fname):
114 def join(self, obj, fname):
115 return obj.vfs.join(fname)
115 return obj.vfs.join(fname)
116
116
117 class storecache(_basefilecache):
117 class storecache(_basefilecache):
118 """filecache for files in the store"""
118 """filecache for files in the store"""
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(storecache, self).__init__(*paths)
120 super(storecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, ''))
122 _cachedfiles.add((path, ''))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.sjoin(fname)
125 return obj.sjoin(fname)
126
126
127 class mixedrepostorecache(_basefilecache):
127 class mixedrepostorecache(_basefilecache):
128 """filecache for a mix files in .hg/store and outside"""
128 """filecache for a mix files in .hg/store and outside"""
129 def __init__(self, *pathsandlocations):
129 def __init__(self, *pathsandlocations):
130 # scmutil.filecache only uses the path for passing back into our
130 # scmutil.filecache only uses the path for passing back into our
131 # join(), so we can safely pass a list of paths and locations
131 # join(), so we can safely pass a list of paths and locations
132 super(mixedrepostorecache, self).__init__(*pathsandlocations)
132 super(mixedrepostorecache, self).__init__(*pathsandlocations)
133 _cachedfiles.update(pathsandlocations)
133 _cachedfiles.update(pathsandlocations)
134
134
135 def join(self, obj, fnameandlocation):
135 def join(self, obj, fnameandlocation):
136 fname, location = fnameandlocation
136 fname, location = fnameandlocation
137 if location == 'plain':
137 if location == 'plain':
138 return obj.vfs.join(fname)
138 return obj.vfs.join(fname)
139 else:
139 else:
140 if location != '':
140 if location != '':
141 raise error.ProgrammingError('unexpected location: %s' %
141 raise error.ProgrammingError('unexpected location: %s' %
142 location)
142 location)
143 return obj.sjoin(fname)
143 return obj.sjoin(fname)
144
144
145 def isfilecached(repo, name):
145 def isfilecached(repo, name):
146 """check if a repo has already cached "name" filecache-ed property
146 """check if a repo has already cached "name" filecache-ed property
147
147
148 This returns (cachedobj-or-None, iscached) tuple.
148 This returns (cachedobj-or-None, iscached) tuple.
149 """
149 """
150 cacheentry = repo.unfiltered()._filecache.get(name, None)
150 cacheentry = repo.unfiltered()._filecache.get(name, None)
151 if not cacheentry:
151 if not cacheentry:
152 return None, False
152 return None, False
153 return cacheentry.obj, True
153 return cacheentry.obj, True
154
154
155 class unfilteredpropertycache(util.propertycache):
155 class unfilteredpropertycache(util.propertycache):
156 """propertycache that apply to unfiltered repo only"""
156 """propertycache that apply to unfiltered repo only"""
157
157
158 def __get__(self, repo, type=None):
158 def __get__(self, repo, type=None):
159 unfi = repo.unfiltered()
159 unfi = repo.unfiltered()
160 if unfi is repo:
160 if unfi is repo:
161 return super(unfilteredpropertycache, self).__get__(unfi)
161 return super(unfilteredpropertycache, self).__get__(unfi)
162 return getattr(unfi, self.name)
162 return getattr(unfi, self.name)
163
163
164 class filteredpropertycache(util.propertycache):
164 class filteredpropertycache(util.propertycache):
165 """propertycache that must take filtering in account"""
165 """propertycache that must take filtering in account"""
166
166
167 def cachevalue(self, obj, value):
167 def cachevalue(self, obj, value):
168 object.__setattr__(obj, self.name, value)
168 object.__setattr__(obj, self.name, value)
169
169
170
170
171 def hasunfilteredcache(repo, name):
171 def hasunfilteredcache(repo, name):
172 """check if a repo has an unfilteredpropertycache value for <name>"""
172 """check if a repo has an unfilteredpropertycache value for <name>"""
173 return name in vars(repo.unfiltered())
173 return name in vars(repo.unfiltered())
174
174
175 def unfilteredmethod(orig):
175 def unfilteredmethod(orig):
176 """decorate method that always need to be run on unfiltered version"""
176 """decorate method that always need to be run on unfiltered version"""
177 def wrapper(repo, *args, **kwargs):
177 def wrapper(repo, *args, **kwargs):
178 return orig(repo.unfiltered(), *args, **kwargs)
178 return orig(repo.unfiltered(), *args, **kwargs)
179 return wrapper
179 return wrapper
180
180
181 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
181 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
182 'unbundle'}
182 'unbundle'}
183 legacycaps = moderncaps.union({'changegroupsubset'})
183 legacycaps = moderncaps.union({'changegroupsubset'})
184
184
185 @interfaceutil.implementer(repository.ipeercommandexecutor)
185 @interfaceutil.implementer(repository.ipeercommandexecutor)
186 class localcommandexecutor(object):
186 class localcommandexecutor(object):
187 def __init__(self, peer):
187 def __init__(self, peer):
188 self._peer = peer
188 self._peer = peer
189 self._sent = False
189 self._sent = False
190 self._closed = False
190 self._closed = False
191
191
192 def __enter__(self):
192 def __enter__(self):
193 return self
193 return self
194
194
195 def __exit__(self, exctype, excvalue, exctb):
195 def __exit__(self, exctype, excvalue, exctb):
196 self.close()
196 self.close()
197
197
198 def callcommand(self, command, args):
198 def callcommand(self, command, args):
199 if self._sent:
199 if self._sent:
200 raise error.ProgrammingError('callcommand() cannot be used after '
200 raise error.ProgrammingError('callcommand() cannot be used after '
201 'sendcommands()')
201 'sendcommands()')
202
202
203 if self._closed:
203 if self._closed:
204 raise error.ProgrammingError('callcommand() cannot be used after '
204 raise error.ProgrammingError('callcommand() cannot be used after '
205 'close()')
205 'close()')
206
206
207 # We don't need to support anything fancy. Just call the named
207 # We don't need to support anything fancy. Just call the named
208 # method on the peer and return a resolved future.
208 # method on the peer and return a resolved future.
209 fn = getattr(self._peer, pycompat.sysstr(command))
209 fn = getattr(self._peer, pycompat.sysstr(command))
210
210
211 f = pycompat.futures.Future()
211 f = pycompat.futures.Future()
212
212
213 try:
213 try:
214 result = fn(**pycompat.strkwargs(args))
214 result = fn(**pycompat.strkwargs(args))
215 except Exception:
215 except Exception:
216 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
216 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
217 else:
217 else:
218 f.set_result(result)
218 f.set_result(result)
219
219
220 return f
220 return f
221
221
222 def sendcommands(self):
222 def sendcommands(self):
223 self._sent = True
223 self._sent = True
224
224
225 def close(self):
225 def close(self):
226 self._closed = True
226 self._closed = True
227
227
228 @interfaceutil.implementer(repository.ipeercommands)
228 @interfaceutil.implementer(repository.ipeercommands)
229 class localpeer(repository.peer):
229 class localpeer(repository.peer):
230 '''peer for a local repo; reflects only the most recent API'''
230 '''peer for a local repo; reflects only the most recent API'''
231
231
232 def __init__(self, repo, caps=None):
232 def __init__(self, repo, caps=None):
233 super(localpeer, self).__init__()
233 super(localpeer, self).__init__()
234
234
235 if caps is None:
235 if caps is None:
236 caps = moderncaps.copy()
236 caps = moderncaps.copy()
237 self._repo = repo.filtered('served')
237 self._repo = repo.filtered('served')
238 self.ui = repo.ui
238 self.ui = repo.ui
239 self._caps = repo._restrictcapabilities(caps)
239 self._caps = repo._restrictcapabilities(caps)
240
240
241 # Begin of _basepeer interface.
241 # Begin of _basepeer interface.
242
242
243 def url(self):
243 def url(self):
244 return self._repo.url()
244 return self._repo.url()
245
245
246 def local(self):
246 def local(self):
247 return self._repo
247 return self._repo
248
248
249 def peer(self):
249 def peer(self):
250 return self
250 return self
251
251
252 def canpush(self):
252 def canpush(self):
253 return True
253 return True
254
254
255 def close(self):
255 def close(self):
256 self._repo.close()
256 self._repo.close()
257
257
258 # End of _basepeer interface.
258 # End of _basepeer interface.
259
259
260 # Begin of _basewirecommands interface.
260 # Begin of _basewirecommands interface.
261
261
262 def branchmap(self):
262 def branchmap(self):
263 return self._repo.branchmap()
263 return self._repo.branchmap()
264
264
265 def capabilities(self):
265 def capabilities(self):
266 return self._caps
266 return self._caps
267
267
268 def clonebundles(self):
268 def clonebundles(self):
269 return self._repo.tryread('clonebundles.manifest')
269 return self._repo.tryread('clonebundles.manifest')
270
270
271 def debugwireargs(self, one, two, three=None, four=None, five=None):
271 def debugwireargs(self, one, two, three=None, four=None, five=None):
272 """Used to test argument passing over the wire"""
272 """Used to test argument passing over the wire"""
273 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
273 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
274 pycompat.bytestr(four),
274 pycompat.bytestr(four),
275 pycompat.bytestr(five))
275 pycompat.bytestr(five))
276
276
277 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
277 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
278 **kwargs):
278 **kwargs):
279 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
279 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
280 common=common, bundlecaps=bundlecaps,
280 common=common, bundlecaps=bundlecaps,
281 **kwargs)[1]
281 **kwargs)[1]
282 cb = util.chunkbuffer(chunks)
282 cb = util.chunkbuffer(chunks)
283
283
284 if exchange.bundle2requested(bundlecaps):
284 if exchange.bundle2requested(bundlecaps):
285 # When requesting a bundle2, getbundle returns a stream to make the
285 # When requesting a bundle2, getbundle returns a stream to make the
286 # wire level function happier. We need to build a proper object
286 # wire level function happier. We need to build a proper object
287 # from it in local peer.
287 # from it in local peer.
288 return bundle2.getunbundler(self.ui, cb)
288 return bundle2.getunbundler(self.ui, cb)
289 else:
289 else:
290 return changegroup.getunbundler('01', cb, None)
290 return changegroup.getunbundler('01', cb, None)
291
291
292 def heads(self):
292 def heads(self):
293 return self._repo.heads()
293 return self._repo.heads()
294
294
295 def known(self, nodes):
295 def known(self, nodes):
296 return self._repo.known(nodes)
296 return self._repo.known(nodes)
297
297
298 def listkeys(self, namespace):
298 def listkeys(self, namespace):
299 return self._repo.listkeys(namespace)
299 return self._repo.listkeys(namespace)
300
300
301 def lookup(self, key):
301 def lookup(self, key):
302 return self._repo.lookup(key)
302 return self._repo.lookup(key)
303
303
304 def pushkey(self, namespace, key, old, new):
304 def pushkey(self, namespace, key, old, new):
305 return self._repo.pushkey(namespace, key, old, new)
305 return self._repo.pushkey(namespace, key, old, new)
306
306
307 def stream_out(self):
307 def stream_out(self):
308 raise error.Abort(_('cannot perform stream clone against local '
308 raise error.Abort(_('cannot perform stream clone against local '
309 'peer'))
309 'peer'))
310
310
311 def unbundle(self, bundle, heads, url):
311 def unbundle(self, bundle, heads, url):
312 """apply a bundle on a repo
312 """apply a bundle on a repo
313
313
314 This function handles the repo locking itself."""
314 This function handles the repo locking itself."""
315 try:
315 try:
316 try:
316 try:
317 bundle = exchange.readbundle(self.ui, bundle, None)
317 bundle = exchange.readbundle(self.ui, bundle, None)
318 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
318 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
319 if util.safehasattr(ret, 'getchunks'):
319 if util.safehasattr(ret, 'getchunks'):
320 # This is a bundle20 object, turn it into an unbundler.
320 # This is a bundle20 object, turn it into an unbundler.
321 # This little dance should be dropped eventually when the
321 # This little dance should be dropped eventually when the
322 # API is finally improved.
322 # API is finally improved.
323 stream = util.chunkbuffer(ret.getchunks())
323 stream = util.chunkbuffer(ret.getchunks())
324 ret = bundle2.getunbundler(self.ui, stream)
324 ret = bundle2.getunbundler(self.ui, stream)
325 return ret
325 return ret
326 except Exception as exc:
326 except Exception as exc:
327 # If the exception contains output salvaged from a bundle2
327 # If the exception contains output salvaged from a bundle2
328 # reply, we need to make sure it is printed before continuing
328 # reply, we need to make sure it is printed before continuing
329 # to fail. So we build a bundle2 with such output and consume
329 # to fail. So we build a bundle2 with such output and consume
330 # it directly.
330 # it directly.
331 #
331 #
332 # This is not very elegant but allows a "simple" solution for
332 # This is not very elegant but allows a "simple" solution for
333 # issue4594
333 # issue4594
334 output = getattr(exc, '_bundle2salvagedoutput', ())
334 output = getattr(exc, '_bundle2salvagedoutput', ())
335 if output:
335 if output:
336 bundler = bundle2.bundle20(self._repo.ui)
336 bundler = bundle2.bundle20(self._repo.ui)
337 for out in output:
337 for out in output:
338 bundler.addpart(out)
338 bundler.addpart(out)
339 stream = util.chunkbuffer(bundler.getchunks())
339 stream = util.chunkbuffer(bundler.getchunks())
340 b = bundle2.getunbundler(self.ui, stream)
340 b = bundle2.getunbundler(self.ui, stream)
341 bundle2.processbundle(self._repo, b)
341 bundle2.processbundle(self._repo, b)
342 raise
342 raise
343 except error.PushRaced as exc:
343 except error.PushRaced as exc:
344 raise error.ResponseError(_('push failed:'),
344 raise error.ResponseError(_('push failed:'),
345 stringutil.forcebytestr(exc))
345 stringutil.forcebytestr(exc))
346
346
347 # End of _basewirecommands interface.
347 # End of _basewirecommands interface.
348
348
349 # Begin of peer interface.
349 # Begin of peer interface.
350
350
351 def commandexecutor(self):
351 def commandexecutor(self):
352 return localcommandexecutor(self)
352 return localcommandexecutor(self)
353
353
354 # End of peer interface.
354 # End of peer interface.
355
355
356 @interfaceutil.implementer(repository.ipeerlegacycommands)
356 @interfaceutil.implementer(repository.ipeerlegacycommands)
357 class locallegacypeer(localpeer):
357 class locallegacypeer(localpeer):
358 '''peer extension which implements legacy methods too; used for tests with
358 '''peer extension which implements legacy methods too; used for tests with
359 restricted capabilities'''
359 restricted capabilities'''
360
360
361 def __init__(self, repo):
361 def __init__(self, repo):
362 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
362 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
363
363
364 # Begin of baselegacywirecommands interface.
364 # Begin of baselegacywirecommands interface.
365
365
366 def between(self, pairs):
366 def between(self, pairs):
367 return self._repo.between(pairs)
367 return self._repo.between(pairs)
368
368
369 def branches(self, nodes):
369 def branches(self, nodes):
370 return self._repo.branches(nodes)
370 return self._repo.branches(nodes)
371
371
372 def changegroup(self, nodes, source):
372 def changegroup(self, nodes, source):
373 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
373 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
374 missingheads=self._repo.heads())
374 missingheads=self._repo.heads())
375 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
375 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
376
376
377 def changegroupsubset(self, bases, heads, source):
377 def changegroupsubset(self, bases, heads, source):
378 outgoing = discovery.outgoing(self._repo, missingroots=bases,
378 outgoing = discovery.outgoing(self._repo, missingroots=bases,
379 missingheads=heads)
379 missingheads=heads)
380 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
380 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
381
381
382 # End of baselegacywirecommands interface.
382 # End of baselegacywirecommands interface.
383
383
384 # Increment the sub-version when the revlog v2 format changes to lock out old
384 # Increment the sub-version when the revlog v2 format changes to lock out old
385 # clients.
385 # clients.
386 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
386 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
387
387
388 # A repository with the sparserevlog feature will have delta chains that
388 # A repository with the sparserevlog feature will have delta chains that
389 # can spread over a larger span. Sparse reading cuts these large spans into
389 # can spread over a larger span. Sparse reading cuts these large spans into
390 # pieces, so that each piece isn't too big.
390 # pieces, so that each piece isn't too big.
391 # Without the sparserevlog capability, reading from the repository could use
391 # Without the sparserevlog capability, reading from the repository could use
392 # huge amounts of memory, because the whole span would be read at once,
392 # huge amounts of memory, because the whole span would be read at once,
393 # including all the intermediate revisions that aren't pertinent for the chain.
393 # including all the intermediate revisions that aren't pertinent for the chain.
394 # This is why once a repository has enabled sparse-read, it becomes required.
394 # This is why once a repository has enabled sparse-read, it becomes required.
395 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
395 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
396
396
397 # Functions receiving (ui, features) that extensions can register to impact
397 # Functions receiving (ui, features) that extensions can register to impact
398 # the ability to load repositories with custom requirements. Only
398 # the ability to load repositories with custom requirements. Only
399 # functions defined in loaded extensions are called.
399 # functions defined in loaded extensions are called.
400 #
400 #
401 # The function receives a set of requirement strings that the repository
401 # The function receives a set of requirement strings that the repository
402 # is capable of opening. Functions will typically add elements to the
402 # is capable of opening. Functions will typically add elements to the
403 # set to reflect that the extension knows how to handle that requirements.
403 # set to reflect that the extension knows how to handle that requirements.
404 featuresetupfuncs = set()
404 featuresetupfuncs = set()
405
405
406 def makelocalrepository(baseui, path, intents=None):
406 def makelocalrepository(baseui, path, intents=None):
407 """Create a local repository object.
407 """Create a local repository object.
408
408
409 Given arguments needed to construct a local repository, this function
409 Given arguments needed to construct a local repository, this function
410 performs various early repository loading functionality (such as
410 performs various early repository loading functionality (such as
411 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
411 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
412 the repository can be opened, derives a type suitable for representing
412 the repository can be opened, derives a type suitable for representing
413 that repository, and returns an instance of it.
413 that repository, and returns an instance of it.
414
414
415 The returned object conforms to the ``repository.completelocalrepository``
415 The returned object conforms to the ``repository.completelocalrepository``
416 interface.
416 interface.
417
417
418 The repository type is derived by calling a series of factory functions
418 The repository type is derived by calling a series of factory functions
419 for each aspect/interface of the final repository. These are defined by
419 for each aspect/interface of the final repository. These are defined by
420 ``REPO_INTERFACES``.
420 ``REPO_INTERFACES``.
421
421
422 Each factory function is called to produce a type implementing a specific
422 Each factory function is called to produce a type implementing a specific
423 interface. The cumulative list of returned types will be combined into a
423 interface. The cumulative list of returned types will be combined into a
424 new type and that type will be instantiated to represent the local
424 new type and that type will be instantiated to represent the local
425 repository.
425 repository.
426
426
427 The factory functions each receive various state that may be consulted
427 The factory functions each receive various state that may be consulted
428 as part of deriving a type.
428 as part of deriving a type.
429
429
430 Extensions should wrap these factory functions to customize repository type
430 Extensions should wrap these factory functions to customize repository type
431 creation. Note that an extension's wrapped function may be called even if
431 creation. Note that an extension's wrapped function may be called even if
432 that extension is not loaded for the repo being constructed. Extensions
432 that extension is not loaded for the repo being constructed. Extensions
433 should check if their ``__name__`` appears in the
433 should check if their ``__name__`` appears in the
434 ``extensionmodulenames`` set passed to the factory function and no-op if
434 ``extensionmodulenames`` set passed to the factory function and no-op if
435 not.
435 not.
436 """
436 """
437 ui = baseui.copy()
437 ui = baseui.copy()
438 # Prevent copying repo configuration.
438 # Prevent copying repo configuration.
439 ui.copy = baseui.copy
439 ui.copy = baseui.copy
440
440
441 # Working directory VFS rooted at repository root.
441 # Working directory VFS rooted at repository root.
442 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
442 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
443
443
444 # Main VFS for .hg/ directory.
444 # Main VFS for .hg/ directory.
445 hgpath = wdirvfs.join(b'.hg')
445 hgpath = wdirvfs.join(b'.hg')
446 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
446 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
447
447
448 # The .hg/ path should exist and should be a directory. All other
448 # The .hg/ path should exist and should be a directory. All other
449 # cases are errors.
449 # cases are errors.
450 if not hgvfs.isdir():
450 if not hgvfs.isdir():
451 try:
451 try:
452 hgvfs.stat()
452 hgvfs.stat()
453 except OSError as e:
453 except OSError as e:
454 if e.errno != errno.ENOENT:
454 if e.errno != errno.ENOENT:
455 raise
455 raise
456
456
457 raise error.RepoError(_(b'repository %s not found') % path)
457 raise error.RepoError(_(b'repository %s not found') % path)
458
458
459 # .hg/requires file contains a newline-delimited list of
459 # .hg/requires file contains a newline-delimited list of
460 # features/capabilities the opener (us) must have in order to use
460 # features/capabilities the opener (us) must have in order to use
461 # the repository. This file was introduced in Mercurial 0.9.2,
461 # the repository. This file was introduced in Mercurial 0.9.2,
462 # which means very old repositories may not have one. We assume
462 # which means very old repositories may not have one. We assume
463 # a missing file translates to no requirements.
463 # a missing file translates to no requirements.
464 try:
464 try:
465 requirements = set(hgvfs.read(b'requires').splitlines())
465 requirements = set(hgvfs.read(b'requires').splitlines())
466 except IOError as e:
466 except IOError as e:
467 if e.errno != errno.ENOENT:
467 if e.errno != errno.ENOENT:
468 raise
468 raise
469 requirements = set()
469 requirements = set()
470
470
471 # The .hg/hgrc file may load extensions or contain config options
471 # The .hg/hgrc file may load extensions or contain config options
472 # that influence repository construction. Attempt to load it and
472 # that influence repository construction. Attempt to load it and
473 # process any new extensions that it may have pulled in.
473 # process any new extensions that it may have pulled in.
474 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
474 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
475 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
475 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
476 extensions.loadall(ui)
476 extensions.loadall(ui)
477 extensions.populateui(ui)
477 extensions.populateui(ui)
478
478
479 # Set of module names of extensions loaded for this repository.
479 # Set of module names of extensions loaded for this repository.
480 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
480 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
481
481
482 supportedrequirements = gathersupportedrequirements(ui)
482 supportedrequirements = gathersupportedrequirements(ui)
483
483
484 # We first validate the requirements are known.
484 # We first validate the requirements are known.
485 ensurerequirementsrecognized(requirements, supportedrequirements)
485 ensurerequirementsrecognized(requirements, supportedrequirements)
486
486
487 # Then we validate that the known set is reasonable to use together.
487 # Then we validate that the known set is reasonable to use together.
488 ensurerequirementscompatible(ui, requirements)
488 ensurerequirementscompatible(ui, requirements)
489
489
490 # TODO there are unhandled edge cases related to opening repositories with
490 # TODO there are unhandled edge cases related to opening repositories with
491 # shared storage. If storage is shared, we should also test for requirements
491 # shared storage. If storage is shared, we should also test for requirements
492 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
492 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
493 # that repo, as that repo may load extensions needed to open it. This is a
493 # that repo, as that repo may load extensions needed to open it. This is a
494 # bit complicated because we don't want the other hgrc to overwrite settings
494 # bit complicated because we don't want the other hgrc to overwrite settings
495 # in this hgrc.
495 # in this hgrc.
496 #
496 #
497 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
497 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
498 # file when sharing repos. But if a requirement is added after the share is
498 # file when sharing repos. But if a requirement is added after the share is
499 # performed, thereby introducing a new requirement for the opener, we may
499 # performed, thereby introducing a new requirement for the opener, we may
500 # will not see that and could encounter a run-time error interacting with
500 # will not see that and could encounter a run-time error interacting with
501 # that shared store since it has an unknown-to-us requirement.
501 # that shared store since it has an unknown-to-us requirement.
502
502
503 # At this point, we know we should be capable of opening the repository.
503 # At this point, we know we should be capable of opening the repository.
504 # Now get on with doing that.
504 # Now get on with doing that.
505
505
506 features = set()
506 features = set()
507
507
508 # The "store" part of the repository holds versioned data. How it is
508 # The "store" part of the repository holds versioned data. How it is
509 # accessed is determined by various requirements. The ``shared`` or
509 # accessed is determined by various requirements. The ``shared`` or
510 # ``relshared`` requirements indicate the store lives in the path contained
510 # ``relshared`` requirements indicate the store lives in the path contained
511 # in the ``.hg/sharedpath`` file. This is an absolute path for
511 # in the ``.hg/sharedpath`` file. This is an absolute path for
512 # ``shared`` and relative to ``.hg/`` for ``relshared``.
512 # ``shared`` and relative to ``.hg/`` for ``relshared``.
513 if b'shared' in requirements or b'relshared' in requirements:
513 if b'shared' in requirements or b'relshared' in requirements:
514 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
515 if b'relshared' in requirements:
515 if b'relshared' in requirements:
516 sharedpath = hgvfs.join(sharedpath)
516 sharedpath = hgvfs.join(sharedpath)
517
517
518 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
519
519
520 if not sharedvfs.exists():
520 if not sharedvfs.exists():
521 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
521 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
522 b'directory %s') % sharedvfs.base)
522 b'directory %s') % sharedvfs.base)
523
523
524 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
524 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
525
525
526 storebasepath = sharedvfs.base
526 storebasepath = sharedvfs.base
527 cachepath = sharedvfs.join(b'cache')
527 cachepath = sharedvfs.join(b'cache')
528 else:
528 else:
529 storebasepath = hgvfs.base
529 storebasepath = hgvfs.base
530 cachepath = hgvfs.join(b'cache')
530 cachepath = hgvfs.join(b'cache')
531 wcachepath = hgvfs.join(b'wcache')
531 wcachepath = hgvfs.join(b'wcache')
532
532
533
533
534 # The store has changed over time and the exact layout is dictated by
534 # The store has changed over time and the exact layout is dictated by
535 # requirements. The store interface abstracts differences across all
535 # requirements. The store interface abstracts differences across all
536 # of them.
536 # of them.
537 store = makestore(requirements, storebasepath,
537 store = makestore(requirements, storebasepath,
538 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 lambda base: vfsmod.vfs(base, cacheaudited=True))
539 hgvfs.createmode = store.createmode
539 hgvfs.createmode = store.createmode
540
540
541 storevfs = store.vfs
541 storevfs = store.vfs
542 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
542 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
543
543
544 # The cache vfs is used to manage cache files.
544 # The cache vfs is used to manage cache files.
545 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
546 cachevfs.createmode = store.createmode
546 cachevfs.createmode = store.createmode
547 # The cache vfs is used to manage cache files related to the working copy
547 # The cache vfs is used to manage cache files related to the working copy
548 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
548 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
549 wcachevfs.createmode = store.createmode
549 wcachevfs.createmode = store.createmode
550
550
551 # Now resolve the type for the repository object. We do this by repeatedly
551 # Now resolve the type for the repository object. We do this by repeatedly
552 # calling a factory function to produces types for specific aspects of the
552 # calling a factory function to produces types for specific aspects of the
553 # repo's operation. The aggregate returned types are used as base classes
553 # repo's operation. The aggregate returned types are used as base classes
554 # for a dynamically-derived type, which will represent our new repository.
554 # for a dynamically-derived type, which will represent our new repository.
555
555
556 bases = []
556 bases = []
557 extrastate = {}
557 extrastate = {}
558
558
559 for iface, fn in REPO_INTERFACES:
559 for iface, fn in REPO_INTERFACES:
560 # We pass all potentially useful state to give extensions tons of
560 # We pass all potentially useful state to give extensions tons of
561 # flexibility.
561 # flexibility.
562 typ = fn()(ui=ui,
562 typ = fn()(ui=ui,
563 intents=intents,
563 intents=intents,
564 requirements=requirements,
564 requirements=requirements,
565 features=features,
565 features=features,
566 wdirvfs=wdirvfs,
566 wdirvfs=wdirvfs,
567 hgvfs=hgvfs,
567 hgvfs=hgvfs,
568 store=store,
568 store=store,
569 storevfs=storevfs,
569 storevfs=storevfs,
570 storeoptions=storevfs.options,
570 storeoptions=storevfs.options,
571 cachevfs=cachevfs,
571 cachevfs=cachevfs,
572 wcachevfs=wcachevfs,
572 wcachevfs=wcachevfs,
573 extensionmodulenames=extensionmodulenames,
573 extensionmodulenames=extensionmodulenames,
574 extrastate=extrastate,
574 extrastate=extrastate,
575 baseclasses=bases)
575 baseclasses=bases)
576
576
577 if not isinstance(typ, type):
577 if not isinstance(typ, type):
578 raise error.ProgrammingError('unable to construct type for %s' %
578 raise error.ProgrammingError('unable to construct type for %s' %
579 iface)
579 iface)
580
580
581 bases.append(typ)
581 bases.append(typ)
582
582
583 # type() allows you to use characters in type names that wouldn't be
583 # type() allows you to use characters in type names that wouldn't be
584 # recognized as Python symbols in source code. We abuse that to add
584 # recognized as Python symbols in source code. We abuse that to add
585 # rich information about our constructed repo.
585 # rich information about our constructed repo.
586 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
586 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
587 wdirvfs.base,
587 wdirvfs.base,
588 b','.join(sorted(requirements))))
588 b','.join(sorted(requirements))))
589
589
590 cls = type(name, tuple(bases), {})
590 cls = type(name, tuple(bases), {})
591
591
592 return cls(
592 return cls(
593 baseui=baseui,
593 baseui=baseui,
594 ui=ui,
594 ui=ui,
595 origroot=path,
595 origroot=path,
596 wdirvfs=wdirvfs,
596 wdirvfs=wdirvfs,
597 hgvfs=hgvfs,
597 hgvfs=hgvfs,
598 requirements=requirements,
598 requirements=requirements,
599 supportedrequirements=supportedrequirements,
599 supportedrequirements=supportedrequirements,
600 sharedpath=storebasepath,
600 sharedpath=storebasepath,
601 store=store,
601 store=store,
602 cachevfs=cachevfs,
602 cachevfs=cachevfs,
603 wcachevfs=wcachevfs,
603 wcachevfs=wcachevfs,
604 features=features,
604 features=features,
605 intents=intents)
605 intents=intents)
606
606
607 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
607 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
608 """Load hgrc files/content into a ui instance.
608 """Load hgrc files/content into a ui instance.
609
609
610 This is called during repository opening to load any additional
610 This is called during repository opening to load any additional
611 config files or settings relevant to the current repository.
611 config files or settings relevant to the current repository.
612
612
613 Returns a bool indicating whether any additional configs were loaded.
613 Returns a bool indicating whether any additional configs were loaded.
614
614
615 Extensions should monkeypatch this function to modify how per-repo
615 Extensions should monkeypatch this function to modify how per-repo
616 configs are loaded. For example, an extension may wish to pull in
616 configs are loaded. For example, an extension may wish to pull in
617 configs from alternate files or sources.
617 configs from alternate files or sources.
618 """
618 """
619 try:
619 try:
620 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
620 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
621 return True
621 return True
622 except IOError:
622 except IOError:
623 return False
623 return False
624
624
625 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
625 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
626 """Perform additional actions after .hg/hgrc is loaded.
626 """Perform additional actions after .hg/hgrc is loaded.
627
627
628 This function is called during repository loading immediately after
628 This function is called during repository loading immediately after
629 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
629 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
630
630
631 The function can be used to validate configs, automatically add
631 The function can be used to validate configs, automatically add
632 options (including extensions) based on requirements, etc.
632 options (including extensions) based on requirements, etc.
633 """
633 """
634
634
635 # Map of requirements to list of extensions to load automatically when
635 # Map of requirements to list of extensions to load automatically when
636 # requirement is present.
636 # requirement is present.
637 autoextensions = {
637 autoextensions = {
638 b'largefiles': [b'largefiles'],
638 b'largefiles': [b'largefiles'],
639 b'lfs': [b'lfs'],
639 b'lfs': [b'lfs'],
640 }
640 }
641
641
642 for requirement, names in sorted(autoextensions.items()):
642 for requirement, names in sorted(autoextensions.items()):
643 if requirement not in requirements:
643 if requirement not in requirements:
644 continue
644 continue
645
645
646 for name in names:
646 for name in names:
647 if not ui.hasconfig(b'extensions', name):
647 if not ui.hasconfig(b'extensions', name):
648 ui.setconfig(b'extensions', name, b'', source='autoload')
648 ui.setconfig(b'extensions', name, b'', source='autoload')
649
649
650 def gathersupportedrequirements(ui):
650 def gathersupportedrequirements(ui):
651 """Determine the complete set of recognized requirements."""
651 """Determine the complete set of recognized requirements."""
652 # Start with all requirements supported by this file.
652 # Start with all requirements supported by this file.
653 supported = set(localrepository._basesupported)
653 supported = set(localrepository._basesupported)
654
654
655 # Execute ``featuresetupfuncs`` entries if they belong to an extension
655 # Execute ``featuresetupfuncs`` entries if they belong to an extension
656 # relevant to this ui instance.
656 # relevant to this ui instance.
657 modules = {m.__name__ for n, m in extensions.extensions(ui)}
657 modules = {m.__name__ for n, m in extensions.extensions(ui)}
658
658
659 for fn in featuresetupfuncs:
659 for fn in featuresetupfuncs:
660 if fn.__module__ in modules:
660 if fn.__module__ in modules:
661 fn(ui, supported)
661 fn(ui, supported)
662
662
663 # Add derived requirements from registered compression engines.
663 # Add derived requirements from registered compression engines.
664 for name in util.compengines:
664 for name in util.compengines:
665 engine = util.compengines[name]
665 engine = util.compengines[name]
666 if engine.available() and engine.revlogheader():
666 if engine.available() and engine.revlogheader():
667 supported.add(b'exp-compression-%s' % name)
667 supported.add(b'exp-compression-%s' % name)
668 if engine.name() == 'zstd':
668 if engine.name() == 'zstd':
669 supported.add(b'revlog-compression-zstd')
669 supported.add(b'revlog-compression-zstd')
670
670
671 return supported
671 return supported
672
672
673 def ensurerequirementsrecognized(requirements, supported):
673 def ensurerequirementsrecognized(requirements, supported):
674 """Validate that a set of local requirements is recognized.
674 """Validate that a set of local requirements is recognized.
675
675
676 Receives a set of requirements. Raises an ``error.RepoError`` if there
676 Receives a set of requirements. Raises an ``error.RepoError`` if there
677 exists any requirement in that set that currently loaded code doesn't
677 exists any requirement in that set that currently loaded code doesn't
678 recognize.
678 recognize.
679
679
680 Returns a set of supported requirements.
680 Returns a set of supported requirements.
681 """
681 """
682 missing = set()
682 missing = set()
683
683
684 for requirement in requirements:
684 for requirement in requirements:
685 if requirement in supported:
685 if requirement in supported:
686 continue
686 continue
687
687
688 if not requirement or not requirement[0:1].isalnum():
688 if not requirement or not requirement[0:1].isalnum():
689 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
689 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
690
690
691 missing.add(requirement)
691 missing.add(requirement)
692
692
693 if missing:
693 if missing:
694 raise error.RequirementError(
694 raise error.RequirementError(
695 _(b'repository requires features unknown to this Mercurial: %s') %
695 _(b'repository requires features unknown to this Mercurial: %s') %
696 b' '.join(sorted(missing)),
696 b' '.join(sorted(missing)),
697 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
697 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
698 b'for more information'))
698 b'for more information'))
699
699
700 def ensurerequirementscompatible(ui, requirements):
700 def ensurerequirementscompatible(ui, requirements):
701 """Validates that a set of recognized requirements is mutually compatible.
701 """Validates that a set of recognized requirements is mutually compatible.
702
702
703 Some requirements may not be compatible with others or require
703 Some requirements may not be compatible with others or require
704 config options that aren't enabled. This function is called during
704 config options that aren't enabled. This function is called during
705 repository opening to ensure that the set of requirements needed
705 repository opening to ensure that the set of requirements needed
706 to open a repository is sane and compatible with config options.
706 to open a repository is sane and compatible with config options.
707
707
708 Extensions can monkeypatch this function to perform additional
708 Extensions can monkeypatch this function to perform additional
709 checking.
709 checking.
710
710
711 ``error.RepoError`` should be raised on failure.
711 ``error.RepoError`` should be raised on failure.
712 """
712 """
713 if b'exp-sparse' in requirements and not sparse.enabled:
713 if b'exp-sparse' in requirements and not sparse.enabled:
714 raise error.RepoError(_(b'repository is using sparse feature but '
714 raise error.RepoError(_(b'repository is using sparse feature but '
715 b'sparse is not enabled; enable the '
715 b'sparse is not enabled; enable the '
716 b'"sparse" extensions to access'))
716 b'"sparse" extensions to access'))
717
717
718 def makestore(requirements, path, vfstype):
718 def makestore(requirements, path, vfstype):
719 """Construct a storage object for a repository."""
719 """Construct a storage object for a repository."""
720 if b'store' in requirements:
720 if b'store' in requirements:
721 if b'fncache' in requirements:
721 if b'fncache' in requirements:
722 return storemod.fncachestore(path, vfstype,
722 return storemod.fncachestore(path, vfstype,
723 b'dotencode' in requirements)
723 b'dotencode' in requirements)
724
724
725 return storemod.encodedstore(path, vfstype)
725 return storemod.encodedstore(path, vfstype)
726
726
727 return storemod.basicstore(path, vfstype)
727 return storemod.basicstore(path, vfstype)
728
728
729 def resolvestorevfsoptions(ui, requirements, features):
729 def resolvestorevfsoptions(ui, requirements, features):
730 """Resolve the options to pass to the store vfs opener.
730 """Resolve the options to pass to the store vfs opener.
731
731
732 The returned dict is used to influence behavior of the storage layer.
732 The returned dict is used to influence behavior of the storage layer.
733 """
733 """
734 options = {}
734 options = {}
735
735
736 if b'treemanifest' in requirements:
736 if b'treemanifest' in requirements:
737 options[b'treemanifest'] = True
737 options[b'treemanifest'] = True
738
738
739 # experimental config: format.manifestcachesize
739 # experimental config: format.manifestcachesize
740 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
740 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
741 if manifestcachesize is not None:
741 if manifestcachesize is not None:
742 options[b'manifestcachesize'] = manifestcachesize
742 options[b'manifestcachesize'] = manifestcachesize
743
743
744 # In the absence of another requirement superseding a revlog-related
744 # In the absence of another requirement superseding a revlog-related
745 # requirement, we have to assume the repo is using revlog version 0.
745 # requirement, we have to assume the repo is using revlog version 0.
746 # This revlog format is super old and we don't bother trying to parse
746 # This revlog format is super old and we don't bother trying to parse
747 # opener options for it because those options wouldn't do anything
747 # opener options for it because those options wouldn't do anything
748 # meaningful on such old repos.
748 # meaningful on such old repos.
749 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
749 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
750 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
750 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
751 else: # explicitly mark repo as using revlogv0
751 else: # explicitly mark repo as using revlogv0
752 options['revlogv0'] = True
752 options['revlogv0'] = True
753
753
754 writecopiesto = ui.config('experimental', 'copies.write-to')
755 copiesextramode = ('changeset-only', 'compatibility')
756 if (writecopiesto in copiesextramode):
757 options['copies-storage'] = 'extra'
758
754 return options
759 return options
755
760
756 def resolverevlogstorevfsoptions(ui, requirements, features):
761 def resolverevlogstorevfsoptions(ui, requirements, features):
757 """Resolve opener options specific to revlogs."""
762 """Resolve opener options specific to revlogs."""
758
763
759 options = {}
764 options = {}
760 options[b'flagprocessors'] = {}
765 options[b'flagprocessors'] = {}
761
766
762 if b'revlogv1' in requirements:
767 if b'revlogv1' in requirements:
763 options[b'revlogv1'] = True
768 options[b'revlogv1'] = True
764 if REVLOGV2_REQUIREMENT in requirements:
769 if REVLOGV2_REQUIREMENT in requirements:
765 options[b'revlogv2'] = True
770 options[b'revlogv2'] = True
766
771
767 if b'generaldelta' in requirements:
772 if b'generaldelta' in requirements:
768 options[b'generaldelta'] = True
773 options[b'generaldelta'] = True
769
774
770 # experimental config: format.chunkcachesize
775 # experimental config: format.chunkcachesize
771 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
776 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
772 if chunkcachesize is not None:
777 if chunkcachesize is not None:
773 options[b'chunkcachesize'] = chunkcachesize
778 options[b'chunkcachesize'] = chunkcachesize
774
779
775 deltabothparents = ui.configbool(b'storage',
780 deltabothparents = ui.configbool(b'storage',
776 b'revlog.optimize-delta-parent-choice')
781 b'revlog.optimize-delta-parent-choice')
777 options[b'deltabothparents'] = deltabothparents
782 options[b'deltabothparents'] = deltabothparents
778
783
779 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
784 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
780 lazydeltabase = False
785 lazydeltabase = False
781 if lazydelta:
786 if lazydelta:
782 lazydeltabase = ui.configbool(b'storage',
787 lazydeltabase = ui.configbool(b'storage',
783 b'revlog.reuse-external-delta-parent')
788 b'revlog.reuse-external-delta-parent')
784 if lazydeltabase is None:
789 if lazydeltabase is None:
785 lazydeltabase = not scmutil.gddeltaconfig(ui)
790 lazydeltabase = not scmutil.gddeltaconfig(ui)
786 options[b'lazydelta'] = lazydelta
791 options[b'lazydelta'] = lazydelta
787 options[b'lazydeltabase'] = lazydeltabase
792 options[b'lazydeltabase'] = lazydeltabase
788
793
789 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
794 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
790 if 0 <= chainspan:
795 if 0 <= chainspan:
791 options[b'maxdeltachainspan'] = chainspan
796 options[b'maxdeltachainspan'] = chainspan
792
797
793 mmapindexthreshold = ui.configbytes(b'experimental',
798 mmapindexthreshold = ui.configbytes(b'experimental',
794 b'mmapindexthreshold')
799 b'mmapindexthreshold')
795 if mmapindexthreshold is not None:
800 if mmapindexthreshold is not None:
796 options[b'mmapindexthreshold'] = mmapindexthreshold
801 options[b'mmapindexthreshold'] = mmapindexthreshold
797
802
798 withsparseread = ui.configbool(b'experimental', b'sparse-read')
803 withsparseread = ui.configbool(b'experimental', b'sparse-read')
799 srdensitythres = float(ui.config(b'experimental',
804 srdensitythres = float(ui.config(b'experimental',
800 b'sparse-read.density-threshold'))
805 b'sparse-read.density-threshold'))
801 srmingapsize = ui.configbytes(b'experimental',
806 srmingapsize = ui.configbytes(b'experimental',
802 b'sparse-read.min-gap-size')
807 b'sparse-read.min-gap-size')
803 options[b'with-sparse-read'] = withsparseread
808 options[b'with-sparse-read'] = withsparseread
804 options[b'sparse-read-density-threshold'] = srdensitythres
809 options[b'sparse-read-density-threshold'] = srdensitythres
805 options[b'sparse-read-min-gap-size'] = srmingapsize
810 options[b'sparse-read-min-gap-size'] = srmingapsize
806
811
807 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
812 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
808 options[b'sparse-revlog'] = sparserevlog
813 options[b'sparse-revlog'] = sparserevlog
809 if sparserevlog:
814 if sparserevlog:
810 options[b'generaldelta'] = True
815 options[b'generaldelta'] = True
811
816
812 maxchainlen = None
817 maxchainlen = None
813 if sparserevlog:
818 if sparserevlog:
814 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
819 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
815 # experimental config: format.maxchainlen
820 # experimental config: format.maxchainlen
816 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
821 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
817 if maxchainlen is not None:
822 if maxchainlen is not None:
818 options[b'maxchainlen'] = maxchainlen
823 options[b'maxchainlen'] = maxchainlen
819
824
820 for r in requirements:
825 for r in requirements:
821 # we allow multiple compression engine requirement to co-exist because
826 # we allow multiple compression engine requirement to co-exist because
822 # strickly speaking, revlog seems to support mixed compression style.
827 # strickly speaking, revlog seems to support mixed compression style.
823 #
828 #
824 # The compression used for new entries will be "the last one"
829 # The compression used for new entries will be "the last one"
825 prefix = r.startswith
830 prefix = r.startswith
826 if prefix('revlog-compression-') or prefix('exp-compression-'):
831 if prefix('revlog-compression-') or prefix('exp-compression-'):
827 options[b'compengine'] = r.split('-', 2)[2]
832 options[b'compengine'] = r.split('-', 2)[2]
828
833
829 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
834 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
830 if options[b'zlib.level'] is not None:
835 if options[b'zlib.level'] is not None:
831 if not (0 <= options[b'zlib.level'] <= 9):
836 if not (0 <= options[b'zlib.level'] <= 9):
832 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
837 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
833 raise error.Abort(msg % options[b'zlib.level'])
838 raise error.Abort(msg % options[b'zlib.level'])
834 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
839 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
835 if options[b'zstd.level'] is not None:
840 if options[b'zstd.level'] is not None:
836 if not (0 <= options[b'zstd.level'] <= 22):
841 if not (0 <= options[b'zstd.level'] <= 22):
837 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
842 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
838 raise error.Abort(msg % options[b'zstd.level'])
843 raise error.Abort(msg % options[b'zstd.level'])
839
844
840 if repository.NARROW_REQUIREMENT in requirements:
845 if repository.NARROW_REQUIREMENT in requirements:
841 options[b'enableellipsis'] = True
846 options[b'enableellipsis'] = True
842
847
843 return options
848 return options
844
849
845 def makemain(**kwargs):
850 def makemain(**kwargs):
846 """Produce a type conforming to ``ilocalrepositorymain``."""
851 """Produce a type conforming to ``ilocalrepositorymain``."""
847 return localrepository
852 return localrepository
848
853
849 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
854 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
850 class revlogfilestorage(object):
855 class revlogfilestorage(object):
851 """File storage when using revlogs."""
856 """File storage when using revlogs."""
852
857
853 def file(self, path):
858 def file(self, path):
854 if path[0] == b'/':
859 if path[0] == b'/':
855 path = path[1:]
860 path = path[1:]
856
861
857 return filelog.filelog(self.svfs, path)
862 return filelog.filelog(self.svfs, path)
858
863
859 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
864 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
860 class revlognarrowfilestorage(object):
865 class revlognarrowfilestorage(object):
861 """File storage when using revlogs and narrow files."""
866 """File storage when using revlogs and narrow files."""
862
867
863 def file(self, path):
868 def file(self, path):
864 if path[0] == b'/':
869 if path[0] == b'/':
865 path = path[1:]
870 path = path[1:]
866
871
867 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
872 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
868
873
869 def makefilestorage(requirements, features, **kwargs):
874 def makefilestorage(requirements, features, **kwargs):
870 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
875 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
871 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
876 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
872 features.add(repository.REPO_FEATURE_STREAM_CLONE)
877 features.add(repository.REPO_FEATURE_STREAM_CLONE)
873
878
874 if repository.NARROW_REQUIREMENT in requirements:
879 if repository.NARROW_REQUIREMENT in requirements:
875 return revlognarrowfilestorage
880 return revlognarrowfilestorage
876 else:
881 else:
877 return revlogfilestorage
882 return revlogfilestorage
878
883
879 # List of repository interfaces and factory functions for them. Each
884 # List of repository interfaces and factory functions for them. Each
880 # will be called in order during ``makelocalrepository()`` to iteratively
885 # will be called in order during ``makelocalrepository()`` to iteratively
881 # derive the final type for a local repository instance. We capture the
886 # derive the final type for a local repository instance. We capture the
882 # function as a lambda so we don't hold a reference and the module-level
887 # function as a lambda so we don't hold a reference and the module-level
883 # functions can be wrapped.
888 # functions can be wrapped.
884 REPO_INTERFACES = [
889 REPO_INTERFACES = [
885 (repository.ilocalrepositorymain, lambda: makemain),
890 (repository.ilocalrepositorymain, lambda: makemain),
886 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
891 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
887 ]
892 ]
888
893
889 @interfaceutil.implementer(repository.ilocalrepositorymain)
894 @interfaceutil.implementer(repository.ilocalrepositorymain)
890 class localrepository(object):
895 class localrepository(object):
891 """Main class for representing local repositories.
896 """Main class for representing local repositories.
892
897
893 All local repositories are instances of this class.
898 All local repositories are instances of this class.
894
899
895 Constructed on its own, instances of this class are not usable as
900 Constructed on its own, instances of this class are not usable as
896 repository objects. To obtain a usable repository object, call
901 repository objects. To obtain a usable repository object, call
897 ``hg.repository()``, ``localrepo.instance()``, or
902 ``hg.repository()``, ``localrepo.instance()``, or
898 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
903 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
899 ``instance()`` adds support for creating new repositories.
904 ``instance()`` adds support for creating new repositories.
900 ``hg.repository()`` adds more extension integration, including calling
905 ``hg.repository()`` adds more extension integration, including calling
901 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
906 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
902 used.
907 used.
903 """
908 """
904
909
905 # obsolete experimental requirements:
910 # obsolete experimental requirements:
906 # - manifestv2: An experimental new manifest format that allowed
911 # - manifestv2: An experimental new manifest format that allowed
907 # for stem compression of long paths. Experiment ended up not
912 # for stem compression of long paths. Experiment ended up not
908 # being successful (repository sizes went up due to worse delta
913 # being successful (repository sizes went up due to worse delta
909 # chains), and the code was deleted in 4.6.
914 # chains), and the code was deleted in 4.6.
910 supportedformats = {
915 supportedformats = {
911 'revlogv1',
916 'revlogv1',
912 'generaldelta',
917 'generaldelta',
913 'treemanifest',
918 'treemanifest',
914 REVLOGV2_REQUIREMENT,
919 REVLOGV2_REQUIREMENT,
915 SPARSEREVLOG_REQUIREMENT,
920 SPARSEREVLOG_REQUIREMENT,
916 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
921 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
917 }
922 }
918 _basesupported = supportedformats | {
923 _basesupported = supportedformats | {
919 'store',
924 'store',
920 'fncache',
925 'fncache',
921 'shared',
926 'shared',
922 'relshared',
927 'relshared',
923 'dotencode',
928 'dotencode',
924 'exp-sparse',
929 'exp-sparse',
925 'internal-phase'
930 'internal-phase'
926 }
931 }
927
932
928 # list of prefix for file which can be written without 'wlock'
933 # list of prefix for file which can be written without 'wlock'
929 # Extensions should extend this list when needed
934 # Extensions should extend this list when needed
930 _wlockfreeprefix = {
935 _wlockfreeprefix = {
931 # We migh consider requiring 'wlock' for the next
936 # We migh consider requiring 'wlock' for the next
932 # two, but pretty much all the existing code assume
937 # two, but pretty much all the existing code assume
933 # wlock is not needed so we keep them excluded for
938 # wlock is not needed so we keep them excluded for
934 # now.
939 # now.
935 'hgrc',
940 'hgrc',
936 'requires',
941 'requires',
937 # XXX cache is a complicatged business someone
942 # XXX cache is a complicatged business someone
938 # should investigate this in depth at some point
943 # should investigate this in depth at some point
939 'cache/',
944 'cache/',
940 # XXX shouldn't be dirstate covered by the wlock?
945 # XXX shouldn't be dirstate covered by the wlock?
941 'dirstate',
946 'dirstate',
942 # XXX bisect was still a bit too messy at the time
947 # XXX bisect was still a bit too messy at the time
943 # this changeset was introduced. Someone should fix
948 # this changeset was introduced. Someone should fix
944 # the remainig bit and drop this line
949 # the remainig bit and drop this line
945 'bisect.state',
950 'bisect.state',
946 }
951 }
947
952
948 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
953 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
949 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
954 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
950 features, intents=None):
955 features, intents=None):
951 """Create a new local repository instance.
956 """Create a new local repository instance.
952
957
953 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
958 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
954 or ``localrepo.makelocalrepository()`` for obtaining a new repository
959 or ``localrepo.makelocalrepository()`` for obtaining a new repository
955 object.
960 object.
956
961
957 Arguments:
962 Arguments:
958
963
959 baseui
964 baseui
960 ``ui.ui`` instance that ``ui`` argument was based off of.
965 ``ui.ui`` instance that ``ui`` argument was based off of.
961
966
962 ui
967 ui
963 ``ui.ui`` instance for use by the repository.
968 ``ui.ui`` instance for use by the repository.
964
969
965 origroot
970 origroot
966 ``bytes`` path to working directory root of this repository.
971 ``bytes`` path to working directory root of this repository.
967
972
968 wdirvfs
973 wdirvfs
969 ``vfs.vfs`` rooted at the working directory.
974 ``vfs.vfs`` rooted at the working directory.
970
975
971 hgvfs
976 hgvfs
972 ``vfs.vfs`` rooted at .hg/
977 ``vfs.vfs`` rooted at .hg/
973
978
974 requirements
979 requirements
975 ``set`` of bytestrings representing repository opening requirements.
980 ``set`` of bytestrings representing repository opening requirements.
976
981
977 supportedrequirements
982 supportedrequirements
978 ``set`` of bytestrings representing repository requirements that we
983 ``set`` of bytestrings representing repository requirements that we
979 know how to open. May be a supetset of ``requirements``.
984 know how to open. May be a supetset of ``requirements``.
980
985
981 sharedpath
986 sharedpath
982 ``bytes`` Defining path to storage base directory. Points to a
987 ``bytes`` Defining path to storage base directory. Points to a
983 ``.hg/`` directory somewhere.
988 ``.hg/`` directory somewhere.
984
989
985 store
990 store
986 ``store.basicstore`` (or derived) instance providing access to
991 ``store.basicstore`` (or derived) instance providing access to
987 versioned storage.
992 versioned storage.
988
993
989 cachevfs
994 cachevfs
990 ``vfs.vfs`` used for cache files.
995 ``vfs.vfs`` used for cache files.
991
996
992 wcachevfs
997 wcachevfs
993 ``vfs.vfs`` used for cache files related to the working copy.
998 ``vfs.vfs`` used for cache files related to the working copy.
994
999
995 features
1000 features
996 ``set`` of bytestrings defining features/capabilities of this
1001 ``set`` of bytestrings defining features/capabilities of this
997 instance.
1002 instance.
998
1003
999 intents
1004 intents
1000 ``set`` of system strings indicating what this repo will be used
1005 ``set`` of system strings indicating what this repo will be used
1001 for.
1006 for.
1002 """
1007 """
1003 self.baseui = baseui
1008 self.baseui = baseui
1004 self.ui = ui
1009 self.ui = ui
1005 self.origroot = origroot
1010 self.origroot = origroot
1006 # vfs rooted at working directory.
1011 # vfs rooted at working directory.
1007 self.wvfs = wdirvfs
1012 self.wvfs = wdirvfs
1008 self.root = wdirvfs.base
1013 self.root = wdirvfs.base
1009 # vfs rooted at .hg/. Used to access most non-store paths.
1014 # vfs rooted at .hg/. Used to access most non-store paths.
1010 self.vfs = hgvfs
1015 self.vfs = hgvfs
1011 self.path = hgvfs.base
1016 self.path = hgvfs.base
1012 self.requirements = requirements
1017 self.requirements = requirements
1013 self.supported = supportedrequirements
1018 self.supported = supportedrequirements
1014 self.sharedpath = sharedpath
1019 self.sharedpath = sharedpath
1015 self.store = store
1020 self.store = store
1016 self.cachevfs = cachevfs
1021 self.cachevfs = cachevfs
1017 self.wcachevfs = wcachevfs
1022 self.wcachevfs = wcachevfs
1018 self.features = features
1023 self.features = features
1019
1024
1020 self.filtername = None
1025 self.filtername = None
1021
1026
1022 if (self.ui.configbool('devel', 'all-warnings') or
1027 if (self.ui.configbool('devel', 'all-warnings') or
1023 self.ui.configbool('devel', 'check-locks')):
1028 self.ui.configbool('devel', 'check-locks')):
1024 self.vfs.audit = self._getvfsward(self.vfs.audit)
1029 self.vfs.audit = self._getvfsward(self.vfs.audit)
1025 # A list of callback to shape the phase if no data were found.
1030 # A list of callback to shape the phase if no data were found.
1026 # Callback are in the form: func(repo, roots) --> processed root.
1031 # Callback are in the form: func(repo, roots) --> processed root.
1027 # This list it to be filled by extension during repo setup
1032 # This list it to be filled by extension during repo setup
1028 self._phasedefaults = []
1033 self._phasedefaults = []
1029
1034
1030 color.setup(self.ui)
1035 color.setup(self.ui)
1031
1036
1032 self.spath = self.store.path
1037 self.spath = self.store.path
1033 self.svfs = self.store.vfs
1038 self.svfs = self.store.vfs
1034 self.sjoin = self.store.join
1039 self.sjoin = self.store.join
1035 if (self.ui.configbool('devel', 'all-warnings') or
1040 if (self.ui.configbool('devel', 'all-warnings') or
1036 self.ui.configbool('devel', 'check-locks')):
1041 self.ui.configbool('devel', 'check-locks')):
1037 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1042 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1038 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1043 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1039 else: # standard vfs
1044 else: # standard vfs
1040 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1045 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1041
1046
1042 self._dirstatevalidatewarned = False
1047 self._dirstatevalidatewarned = False
1043
1048
1044 self._branchcaches = branchmap.BranchMapCache()
1049 self._branchcaches = branchmap.BranchMapCache()
1045 self._revbranchcache = None
1050 self._revbranchcache = None
1046 self._filterpats = {}
1051 self._filterpats = {}
1047 self._datafilters = {}
1052 self._datafilters = {}
1048 self._transref = self._lockref = self._wlockref = None
1053 self._transref = self._lockref = self._wlockref = None
1049
1054
1050 # A cache for various files under .hg/ that tracks file changes,
1055 # A cache for various files under .hg/ that tracks file changes,
1051 # (used by the filecache decorator)
1056 # (used by the filecache decorator)
1052 #
1057 #
1053 # Maps a property name to its util.filecacheentry
1058 # Maps a property name to its util.filecacheentry
1054 self._filecache = {}
1059 self._filecache = {}
1055
1060
1056 # hold sets of revision to be filtered
1061 # hold sets of revision to be filtered
1057 # should be cleared when something might have changed the filter value:
1062 # should be cleared when something might have changed the filter value:
1058 # - new changesets,
1063 # - new changesets,
1059 # - phase change,
1064 # - phase change,
1060 # - new obsolescence marker,
1065 # - new obsolescence marker,
1061 # - working directory parent change,
1066 # - working directory parent change,
1062 # - bookmark changes
1067 # - bookmark changes
1063 self.filteredrevcache = {}
1068 self.filteredrevcache = {}
1064
1069
1065 # post-dirstate-status hooks
1070 # post-dirstate-status hooks
1066 self._postdsstatus = []
1071 self._postdsstatus = []
1067
1072
1068 # generic mapping between names and nodes
1073 # generic mapping between names and nodes
1069 self.names = namespaces.namespaces()
1074 self.names = namespaces.namespaces()
1070
1075
1071 # Key to signature value.
1076 # Key to signature value.
1072 self._sparsesignaturecache = {}
1077 self._sparsesignaturecache = {}
1073 # Signature to cached matcher instance.
1078 # Signature to cached matcher instance.
1074 self._sparsematchercache = {}
1079 self._sparsematchercache = {}
1075
1080
1076 self._extrafilterid = repoview.extrafilter(ui)
1081 self._extrafilterid = repoview.extrafilter(ui)
1077
1082
1078 def _getvfsward(self, origfunc):
1083 def _getvfsward(self, origfunc):
1079 """build a ward for self.vfs"""
1084 """build a ward for self.vfs"""
1080 rref = weakref.ref(self)
1085 rref = weakref.ref(self)
1081 def checkvfs(path, mode=None):
1086 def checkvfs(path, mode=None):
1082 ret = origfunc(path, mode=mode)
1087 ret = origfunc(path, mode=mode)
1083 repo = rref()
1088 repo = rref()
1084 if (repo is None
1089 if (repo is None
1085 or not util.safehasattr(repo, '_wlockref')
1090 or not util.safehasattr(repo, '_wlockref')
1086 or not util.safehasattr(repo, '_lockref')):
1091 or not util.safehasattr(repo, '_lockref')):
1087 return
1092 return
1088 if mode in (None, 'r', 'rb'):
1093 if mode in (None, 'r', 'rb'):
1089 return
1094 return
1090 if path.startswith(repo.path):
1095 if path.startswith(repo.path):
1091 # truncate name relative to the repository (.hg)
1096 # truncate name relative to the repository (.hg)
1092 path = path[len(repo.path) + 1:]
1097 path = path[len(repo.path) + 1:]
1093 if path.startswith('cache/'):
1098 if path.startswith('cache/'):
1094 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1099 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1095 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1100 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1096 if path.startswith('journal.') or path.startswith('undo.'):
1101 if path.startswith('journal.') or path.startswith('undo.'):
1097 # journal is covered by 'lock'
1102 # journal is covered by 'lock'
1098 if repo._currentlock(repo._lockref) is None:
1103 if repo._currentlock(repo._lockref) is None:
1099 repo.ui.develwarn('write with no lock: "%s"' % path,
1104 repo.ui.develwarn('write with no lock: "%s"' % path,
1100 stacklevel=3, config='check-locks')
1105 stacklevel=3, config='check-locks')
1101 elif repo._currentlock(repo._wlockref) is None:
1106 elif repo._currentlock(repo._wlockref) is None:
1102 # rest of vfs files are covered by 'wlock'
1107 # rest of vfs files are covered by 'wlock'
1103 #
1108 #
1104 # exclude special files
1109 # exclude special files
1105 for prefix in self._wlockfreeprefix:
1110 for prefix in self._wlockfreeprefix:
1106 if path.startswith(prefix):
1111 if path.startswith(prefix):
1107 return
1112 return
1108 repo.ui.develwarn('write with no wlock: "%s"' % path,
1113 repo.ui.develwarn('write with no wlock: "%s"' % path,
1109 stacklevel=3, config='check-locks')
1114 stacklevel=3, config='check-locks')
1110 return ret
1115 return ret
1111 return checkvfs
1116 return checkvfs
1112
1117
1113 def _getsvfsward(self, origfunc):
1118 def _getsvfsward(self, origfunc):
1114 """build a ward for self.svfs"""
1119 """build a ward for self.svfs"""
1115 rref = weakref.ref(self)
1120 rref = weakref.ref(self)
1116 def checksvfs(path, mode=None):
1121 def checksvfs(path, mode=None):
1117 ret = origfunc(path, mode=mode)
1122 ret = origfunc(path, mode=mode)
1118 repo = rref()
1123 repo = rref()
1119 if repo is None or not util.safehasattr(repo, '_lockref'):
1124 if repo is None or not util.safehasattr(repo, '_lockref'):
1120 return
1125 return
1121 if mode in (None, 'r', 'rb'):
1126 if mode in (None, 'r', 'rb'):
1122 return
1127 return
1123 if path.startswith(repo.sharedpath):
1128 if path.startswith(repo.sharedpath):
1124 # truncate name relative to the repository (.hg)
1129 # truncate name relative to the repository (.hg)
1125 path = path[len(repo.sharedpath) + 1:]
1130 path = path[len(repo.sharedpath) + 1:]
1126 if repo._currentlock(repo._lockref) is None:
1131 if repo._currentlock(repo._lockref) is None:
1127 repo.ui.develwarn('write with no lock: "%s"' % path,
1132 repo.ui.develwarn('write with no lock: "%s"' % path,
1128 stacklevel=4)
1133 stacklevel=4)
1129 return ret
1134 return ret
1130 return checksvfs
1135 return checksvfs
1131
1136
1132 def close(self):
1137 def close(self):
1133 self._writecaches()
1138 self._writecaches()
1134
1139
1135 def _writecaches(self):
1140 def _writecaches(self):
1136 if self._revbranchcache:
1141 if self._revbranchcache:
1137 self._revbranchcache.write()
1142 self._revbranchcache.write()
1138
1143
1139 def _restrictcapabilities(self, caps):
1144 def _restrictcapabilities(self, caps):
1140 if self.ui.configbool('experimental', 'bundle2-advertise'):
1145 if self.ui.configbool('experimental', 'bundle2-advertise'):
1141 caps = set(caps)
1146 caps = set(caps)
1142 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1147 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1143 role='client'))
1148 role='client'))
1144 caps.add('bundle2=' + urlreq.quote(capsblob))
1149 caps.add('bundle2=' + urlreq.quote(capsblob))
1145 return caps
1150 return caps
1146
1151
1147 def _writerequirements(self):
1152 def _writerequirements(self):
1148 scmutil.writerequires(self.vfs, self.requirements)
1153 scmutil.writerequires(self.vfs, self.requirements)
1149
1154
1150 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1155 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1151 # self -> auditor -> self._checknested -> self
1156 # self -> auditor -> self._checknested -> self
1152
1157
1153 @property
1158 @property
1154 def auditor(self):
1159 def auditor(self):
1155 # This is only used by context.workingctx.match in order to
1160 # This is only used by context.workingctx.match in order to
1156 # detect files in subrepos.
1161 # detect files in subrepos.
1157 return pathutil.pathauditor(self.root, callback=self._checknested)
1162 return pathutil.pathauditor(self.root, callback=self._checknested)
1158
1163
1159 @property
1164 @property
1160 def nofsauditor(self):
1165 def nofsauditor(self):
1161 # This is only used by context.basectx.match in order to detect
1166 # This is only used by context.basectx.match in order to detect
1162 # files in subrepos.
1167 # files in subrepos.
1163 return pathutil.pathauditor(self.root, callback=self._checknested,
1168 return pathutil.pathauditor(self.root, callback=self._checknested,
1164 realfs=False, cached=True)
1169 realfs=False, cached=True)
1165
1170
1166 def _checknested(self, path):
1171 def _checknested(self, path):
1167 """Determine if path is a legal nested repository."""
1172 """Determine if path is a legal nested repository."""
1168 if not path.startswith(self.root):
1173 if not path.startswith(self.root):
1169 return False
1174 return False
1170 subpath = path[len(self.root) + 1:]
1175 subpath = path[len(self.root) + 1:]
1171 normsubpath = util.pconvert(subpath)
1176 normsubpath = util.pconvert(subpath)
1172
1177
1173 # XXX: Checking against the current working copy is wrong in
1178 # XXX: Checking against the current working copy is wrong in
1174 # the sense that it can reject things like
1179 # the sense that it can reject things like
1175 #
1180 #
1176 # $ hg cat -r 10 sub/x.txt
1181 # $ hg cat -r 10 sub/x.txt
1177 #
1182 #
1178 # if sub/ is no longer a subrepository in the working copy
1183 # if sub/ is no longer a subrepository in the working copy
1179 # parent revision.
1184 # parent revision.
1180 #
1185 #
1181 # However, it can of course also allow things that would have
1186 # However, it can of course also allow things that would have
1182 # been rejected before, such as the above cat command if sub/
1187 # been rejected before, such as the above cat command if sub/
1183 # is a subrepository now, but was a normal directory before.
1188 # is a subrepository now, but was a normal directory before.
1184 # The old path auditor would have rejected by mistake since it
1189 # The old path auditor would have rejected by mistake since it
1185 # panics when it sees sub/.hg/.
1190 # panics when it sees sub/.hg/.
1186 #
1191 #
1187 # All in all, checking against the working copy seems sensible
1192 # All in all, checking against the working copy seems sensible
1188 # since we want to prevent access to nested repositories on
1193 # since we want to prevent access to nested repositories on
1189 # the filesystem *now*.
1194 # the filesystem *now*.
1190 ctx = self[None]
1195 ctx = self[None]
1191 parts = util.splitpath(subpath)
1196 parts = util.splitpath(subpath)
1192 while parts:
1197 while parts:
1193 prefix = '/'.join(parts)
1198 prefix = '/'.join(parts)
1194 if prefix in ctx.substate:
1199 if prefix in ctx.substate:
1195 if prefix == normsubpath:
1200 if prefix == normsubpath:
1196 return True
1201 return True
1197 else:
1202 else:
1198 sub = ctx.sub(prefix)
1203 sub = ctx.sub(prefix)
1199 return sub.checknested(subpath[len(prefix) + 1:])
1204 return sub.checknested(subpath[len(prefix) + 1:])
1200 else:
1205 else:
1201 parts.pop()
1206 parts.pop()
1202 return False
1207 return False
1203
1208
1204 def peer(self):
1209 def peer(self):
1205 return localpeer(self) # not cached to avoid reference cycle
1210 return localpeer(self) # not cached to avoid reference cycle
1206
1211
1207 def unfiltered(self):
1212 def unfiltered(self):
1208 """Return unfiltered version of the repository
1213 """Return unfiltered version of the repository
1209
1214
1210 Intended to be overwritten by filtered repo."""
1215 Intended to be overwritten by filtered repo."""
1211 return self
1216 return self
1212
1217
1213 def filtered(self, name, visibilityexceptions=None):
1218 def filtered(self, name, visibilityexceptions=None):
1214 """Return a filtered version of a repository
1219 """Return a filtered version of a repository
1215
1220
1216 The `name` parameter is the identifier of the requested view. This
1221 The `name` parameter is the identifier of the requested view. This
1217 will return a repoview object set "exactly" to the specified view.
1222 will return a repoview object set "exactly" to the specified view.
1218
1223
1219 This function does not apply recursive filtering to a repository. For
1224 This function does not apply recursive filtering to a repository. For
1220 example calling `repo.filtered("served")` will return a repoview using
1225 example calling `repo.filtered("served")` will return a repoview using
1221 the "served" view, regardless of the initial view used by `repo`.
1226 the "served" view, regardless of the initial view used by `repo`.
1222
1227
1223 In other word, there is always only one level of `repoview` "filtering".
1228 In other word, there is always only one level of `repoview` "filtering".
1224 """
1229 """
1225 if self._extrafilterid is not None and '%' not in name:
1230 if self._extrafilterid is not None and '%' not in name:
1226 name = name + '%' + self._extrafilterid
1231 name = name + '%' + self._extrafilterid
1227
1232
1228 cls = repoview.newtype(self.unfiltered().__class__)
1233 cls = repoview.newtype(self.unfiltered().__class__)
1229 return cls(self, name, visibilityexceptions)
1234 return cls(self, name, visibilityexceptions)
1230
1235
1231 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1236 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1232 ('bookmarks', ''), ('00changelog.i', ''))
1237 ('bookmarks', ''), ('00changelog.i', ''))
1233 def _bookmarks(self):
1238 def _bookmarks(self):
1234 # Since the multiple files involved in the transaction cannot be
1239 # Since the multiple files involved in the transaction cannot be
1235 # written atomically (with current repository format), there is a race
1240 # written atomically (with current repository format), there is a race
1236 # condition here.
1241 # condition here.
1237 #
1242 #
1238 # 1) changelog content A is read
1243 # 1) changelog content A is read
1239 # 2) outside transaction update changelog to content B
1244 # 2) outside transaction update changelog to content B
1240 # 3) outside transaction update bookmark file referring to content B
1245 # 3) outside transaction update bookmark file referring to content B
1241 # 4) bookmarks file content is read and filtered against changelog-A
1246 # 4) bookmarks file content is read and filtered against changelog-A
1242 #
1247 #
1243 # When this happens, bookmarks against nodes missing from A are dropped.
1248 # When this happens, bookmarks against nodes missing from A are dropped.
1244 #
1249 #
1245 # Having this happening during read is not great, but it become worse
1250 # Having this happening during read is not great, but it become worse
1246 # when this happen during write because the bookmarks to the "unknown"
1251 # when this happen during write because the bookmarks to the "unknown"
1247 # nodes will be dropped for good. However, writes happen within locks.
1252 # nodes will be dropped for good. However, writes happen within locks.
1248 # This locking makes it possible to have a race free consistent read.
1253 # This locking makes it possible to have a race free consistent read.
1249 # For this purpose data read from disc before locking are
1254 # For this purpose data read from disc before locking are
1250 # "invalidated" right after the locks are taken. This invalidations are
1255 # "invalidated" right after the locks are taken. This invalidations are
1251 # "light", the `filecache` mechanism keep the data in memory and will
1256 # "light", the `filecache` mechanism keep the data in memory and will
1252 # reuse them if the underlying files did not changed. Not parsing the
1257 # reuse them if the underlying files did not changed. Not parsing the
1253 # same data multiple times helps performances.
1258 # same data multiple times helps performances.
1254 #
1259 #
1255 # Unfortunately in the case describe above, the files tracked by the
1260 # Unfortunately in the case describe above, the files tracked by the
1256 # bookmarks file cache might not have changed, but the in-memory
1261 # bookmarks file cache might not have changed, but the in-memory
1257 # content is still "wrong" because we used an older changelog content
1262 # content is still "wrong" because we used an older changelog content
1258 # to process the on-disk data. So after locking, the changelog would be
1263 # to process the on-disk data. So after locking, the changelog would be
1259 # refreshed but `_bookmarks` would be preserved.
1264 # refreshed but `_bookmarks` would be preserved.
1260 # Adding `00changelog.i` to the list of tracked file is not
1265 # Adding `00changelog.i` to the list of tracked file is not
1261 # enough, because at the time we build the content for `_bookmarks` in
1266 # enough, because at the time we build the content for `_bookmarks` in
1262 # (4), the changelog file has already diverged from the content used
1267 # (4), the changelog file has already diverged from the content used
1263 # for loading `changelog` in (1)
1268 # for loading `changelog` in (1)
1264 #
1269 #
1265 # To prevent the issue, we force the changelog to be explicitly
1270 # To prevent the issue, we force the changelog to be explicitly
1266 # reloaded while computing `_bookmarks`. The data race can still happen
1271 # reloaded while computing `_bookmarks`. The data race can still happen
1267 # without the lock (with a narrower window), but it would no longer go
1272 # without the lock (with a narrower window), but it would no longer go
1268 # undetected during the lock time refresh.
1273 # undetected during the lock time refresh.
1269 #
1274 #
1270 # The new schedule is as follow
1275 # The new schedule is as follow
1271 #
1276 #
1272 # 1) filecache logic detect that `_bookmarks` needs to be computed
1277 # 1) filecache logic detect that `_bookmarks` needs to be computed
1273 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1278 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1274 # 3) We force `changelog` filecache to be tested
1279 # 3) We force `changelog` filecache to be tested
1275 # 4) cachestat for `changelog` are captured (for changelog)
1280 # 4) cachestat for `changelog` are captured (for changelog)
1276 # 5) `_bookmarks` is computed and cached
1281 # 5) `_bookmarks` is computed and cached
1277 #
1282 #
1278 # The step in (3) ensure we have a changelog at least as recent as the
1283 # The step in (3) ensure we have a changelog at least as recent as the
1279 # cache stat computed in (1). As a result at locking time:
1284 # cache stat computed in (1). As a result at locking time:
1280 # * if the changelog did not changed since (1) -> we can reuse the data
1285 # * if the changelog did not changed since (1) -> we can reuse the data
1281 # * otherwise -> the bookmarks get refreshed.
1286 # * otherwise -> the bookmarks get refreshed.
1282 self._refreshchangelog()
1287 self._refreshchangelog()
1283 return bookmarks.bmstore(self)
1288 return bookmarks.bmstore(self)
1284
1289
1285 def _refreshchangelog(self):
1290 def _refreshchangelog(self):
1286 """make sure the in memory changelog match the on-disk one"""
1291 """make sure the in memory changelog match the on-disk one"""
1287 if ('changelog' in vars(self) and self.currenttransaction() is None):
1292 if ('changelog' in vars(self) and self.currenttransaction() is None):
1288 del self.changelog
1293 del self.changelog
1289
1294
1290 @property
1295 @property
1291 def _activebookmark(self):
1296 def _activebookmark(self):
1292 return self._bookmarks.active
1297 return self._bookmarks.active
1293
1298
1294 # _phasesets depend on changelog. what we need is to call
1299 # _phasesets depend on changelog. what we need is to call
1295 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1300 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1296 # can't be easily expressed in filecache mechanism.
1301 # can't be easily expressed in filecache mechanism.
1297 @storecache('phaseroots', '00changelog.i')
1302 @storecache('phaseroots', '00changelog.i')
1298 def _phasecache(self):
1303 def _phasecache(self):
1299 return phases.phasecache(self, self._phasedefaults)
1304 return phases.phasecache(self, self._phasedefaults)
1300
1305
1301 @storecache('obsstore')
1306 @storecache('obsstore')
1302 def obsstore(self):
1307 def obsstore(self):
1303 return obsolete.makestore(self.ui, self)
1308 return obsolete.makestore(self.ui, self)
1304
1309
1305 @storecache('00changelog.i')
1310 @storecache('00changelog.i')
1306 def changelog(self):
1311 def changelog(self):
1307 return self.store.changelog(txnutil.mayhavepending(self.root))
1312 return self.store.changelog(txnutil.mayhavepending(self.root))
1308
1313
1309 @storecache('00manifest.i')
1314 @storecache('00manifest.i')
1310 def manifestlog(self):
1315 def manifestlog(self):
1311 return self.store.manifestlog(self, self._storenarrowmatch)
1316 return self.store.manifestlog(self, self._storenarrowmatch)
1312
1317
1313 @repofilecache('dirstate')
1318 @repofilecache('dirstate')
1314 def dirstate(self):
1319 def dirstate(self):
1315 return self._makedirstate()
1320 return self._makedirstate()
1316
1321
1317 def _makedirstate(self):
1322 def _makedirstate(self):
1318 """Extension point for wrapping the dirstate per-repo."""
1323 """Extension point for wrapping the dirstate per-repo."""
1319 sparsematchfn = lambda: sparse.matcher(self)
1324 sparsematchfn = lambda: sparse.matcher(self)
1320
1325
1321 return dirstate.dirstate(self.vfs, self.ui, self.root,
1326 return dirstate.dirstate(self.vfs, self.ui, self.root,
1322 self._dirstatevalidate, sparsematchfn)
1327 self._dirstatevalidate, sparsematchfn)
1323
1328
1324 def _dirstatevalidate(self, node):
1329 def _dirstatevalidate(self, node):
1325 try:
1330 try:
1326 self.changelog.rev(node)
1331 self.changelog.rev(node)
1327 return node
1332 return node
1328 except error.LookupError:
1333 except error.LookupError:
1329 if not self._dirstatevalidatewarned:
1334 if not self._dirstatevalidatewarned:
1330 self._dirstatevalidatewarned = True
1335 self._dirstatevalidatewarned = True
1331 self.ui.warn(_("warning: ignoring unknown"
1336 self.ui.warn(_("warning: ignoring unknown"
1332 " working parent %s!\n") % short(node))
1337 " working parent %s!\n") % short(node))
1333 return nullid
1338 return nullid
1334
1339
1335 @storecache(narrowspec.FILENAME)
1340 @storecache(narrowspec.FILENAME)
1336 def narrowpats(self):
1341 def narrowpats(self):
1337 """matcher patterns for this repository's narrowspec
1342 """matcher patterns for this repository's narrowspec
1338
1343
1339 A tuple of (includes, excludes).
1344 A tuple of (includes, excludes).
1340 """
1345 """
1341 return narrowspec.load(self)
1346 return narrowspec.load(self)
1342
1347
1343 @storecache(narrowspec.FILENAME)
1348 @storecache(narrowspec.FILENAME)
1344 def _storenarrowmatch(self):
1349 def _storenarrowmatch(self):
1345 if repository.NARROW_REQUIREMENT not in self.requirements:
1350 if repository.NARROW_REQUIREMENT not in self.requirements:
1346 return matchmod.always()
1351 return matchmod.always()
1347 include, exclude = self.narrowpats
1352 include, exclude = self.narrowpats
1348 return narrowspec.match(self.root, include=include, exclude=exclude)
1353 return narrowspec.match(self.root, include=include, exclude=exclude)
1349
1354
1350 @storecache(narrowspec.FILENAME)
1355 @storecache(narrowspec.FILENAME)
1351 def _narrowmatch(self):
1356 def _narrowmatch(self):
1352 if repository.NARROW_REQUIREMENT not in self.requirements:
1357 if repository.NARROW_REQUIREMENT not in self.requirements:
1353 return matchmod.always()
1358 return matchmod.always()
1354 narrowspec.checkworkingcopynarrowspec(self)
1359 narrowspec.checkworkingcopynarrowspec(self)
1355 include, exclude = self.narrowpats
1360 include, exclude = self.narrowpats
1356 return narrowspec.match(self.root, include=include, exclude=exclude)
1361 return narrowspec.match(self.root, include=include, exclude=exclude)
1357
1362
1358 def narrowmatch(self, match=None, includeexact=False):
1363 def narrowmatch(self, match=None, includeexact=False):
1359 """matcher corresponding the the repo's narrowspec
1364 """matcher corresponding the the repo's narrowspec
1360
1365
1361 If `match` is given, then that will be intersected with the narrow
1366 If `match` is given, then that will be intersected with the narrow
1362 matcher.
1367 matcher.
1363
1368
1364 If `includeexact` is True, then any exact matches from `match` will
1369 If `includeexact` is True, then any exact matches from `match` will
1365 be included even if they're outside the narrowspec.
1370 be included even if they're outside the narrowspec.
1366 """
1371 """
1367 if match:
1372 if match:
1368 if includeexact and not self._narrowmatch.always():
1373 if includeexact and not self._narrowmatch.always():
1369 # do not exclude explicitly-specified paths so that they can
1374 # do not exclude explicitly-specified paths so that they can
1370 # be warned later on
1375 # be warned later on
1371 em = matchmod.exact(match.files())
1376 em = matchmod.exact(match.files())
1372 nm = matchmod.unionmatcher([self._narrowmatch, em])
1377 nm = matchmod.unionmatcher([self._narrowmatch, em])
1373 return matchmod.intersectmatchers(match, nm)
1378 return matchmod.intersectmatchers(match, nm)
1374 return matchmod.intersectmatchers(match, self._narrowmatch)
1379 return matchmod.intersectmatchers(match, self._narrowmatch)
1375 return self._narrowmatch
1380 return self._narrowmatch
1376
1381
1377 def setnarrowpats(self, newincludes, newexcludes):
1382 def setnarrowpats(self, newincludes, newexcludes):
1378 narrowspec.save(self, newincludes, newexcludes)
1383 narrowspec.save(self, newincludes, newexcludes)
1379 self.invalidate(clearfilecache=True)
1384 self.invalidate(clearfilecache=True)
1380
1385
1381 def __getitem__(self, changeid):
1386 def __getitem__(self, changeid):
1382 if changeid is None:
1387 if changeid is None:
1383 return context.workingctx(self)
1388 return context.workingctx(self)
1384 if isinstance(changeid, context.basectx):
1389 if isinstance(changeid, context.basectx):
1385 return changeid
1390 return changeid
1386 if isinstance(changeid, slice):
1391 if isinstance(changeid, slice):
1387 # wdirrev isn't contiguous so the slice shouldn't include it
1392 # wdirrev isn't contiguous so the slice shouldn't include it
1388 return [self[i]
1393 return [self[i]
1389 for i in pycompat.xrange(*changeid.indices(len(self)))
1394 for i in pycompat.xrange(*changeid.indices(len(self)))
1390 if i not in self.changelog.filteredrevs]
1395 if i not in self.changelog.filteredrevs]
1391 try:
1396 try:
1392 if isinstance(changeid, int):
1397 if isinstance(changeid, int):
1393 node = self.changelog.node(changeid)
1398 node = self.changelog.node(changeid)
1394 rev = changeid
1399 rev = changeid
1395 elif changeid == 'null':
1400 elif changeid == 'null':
1396 node = nullid
1401 node = nullid
1397 rev = nullrev
1402 rev = nullrev
1398 elif changeid == 'tip':
1403 elif changeid == 'tip':
1399 node = self.changelog.tip()
1404 node = self.changelog.tip()
1400 rev = self.changelog.rev(node)
1405 rev = self.changelog.rev(node)
1401 elif changeid == '.':
1406 elif changeid == '.':
1402 # this is a hack to delay/avoid loading obsmarkers
1407 # this is a hack to delay/avoid loading obsmarkers
1403 # when we know that '.' won't be hidden
1408 # when we know that '.' won't be hidden
1404 node = self.dirstate.p1()
1409 node = self.dirstate.p1()
1405 rev = self.unfiltered().changelog.rev(node)
1410 rev = self.unfiltered().changelog.rev(node)
1406 elif len(changeid) == 20:
1411 elif len(changeid) == 20:
1407 try:
1412 try:
1408 node = changeid
1413 node = changeid
1409 rev = self.changelog.rev(changeid)
1414 rev = self.changelog.rev(changeid)
1410 except error.FilteredLookupError:
1415 except error.FilteredLookupError:
1411 changeid = hex(changeid) # for the error message
1416 changeid = hex(changeid) # for the error message
1412 raise
1417 raise
1413 except LookupError:
1418 except LookupError:
1414 # check if it might have come from damaged dirstate
1419 # check if it might have come from damaged dirstate
1415 #
1420 #
1416 # XXX we could avoid the unfiltered if we had a recognizable
1421 # XXX we could avoid the unfiltered if we had a recognizable
1417 # exception for filtered changeset access
1422 # exception for filtered changeset access
1418 if (self.local()
1423 if (self.local()
1419 and changeid in self.unfiltered().dirstate.parents()):
1424 and changeid in self.unfiltered().dirstate.parents()):
1420 msg = _("working directory has unknown parent '%s'!")
1425 msg = _("working directory has unknown parent '%s'!")
1421 raise error.Abort(msg % short(changeid))
1426 raise error.Abort(msg % short(changeid))
1422 changeid = hex(changeid) # for the error message
1427 changeid = hex(changeid) # for the error message
1423 raise
1428 raise
1424
1429
1425 elif len(changeid) == 40:
1430 elif len(changeid) == 40:
1426 node = bin(changeid)
1431 node = bin(changeid)
1427 rev = self.changelog.rev(node)
1432 rev = self.changelog.rev(node)
1428 else:
1433 else:
1429 raise error.ProgrammingError(
1434 raise error.ProgrammingError(
1430 "unsupported changeid '%s' of type %s" %
1435 "unsupported changeid '%s' of type %s" %
1431 (changeid, type(changeid)))
1436 (changeid, type(changeid)))
1432
1437
1433 return context.changectx(self, rev, node)
1438 return context.changectx(self, rev, node)
1434
1439
1435 except (error.FilteredIndexError, error.FilteredLookupError):
1440 except (error.FilteredIndexError, error.FilteredLookupError):
1436 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1441 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1437 % pycompat.bytestr(changeid))
1442 % pycompat.bytestr(changeid))
1438 except (IndexError, LookupError):
1443 except (IndexError, LookupError):
1439 raise error.RepoLookupError(
1444 raise error.RepoLookupError(
1440 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1445 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1441 except error.WdirUnsupported:
1446 except error.WdirUnsupported:
1442 return context.workingctx(self)
1447 return context.workingctx(self)
1443
1448
1444 def __contains__(self, changeid):
1449 def __contains__(self, changeid):
1445 """True if the given changeid exists
1450 """True if the given changeid exists
1446
1451
1447 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1452 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1448 specified.
1453 specified.
1449 """
1454 """
1450 try:
1455 try:
1451 self[changeid]
1456 self[changeid]
1452 return True
1457 return True
1453 except error.RepoLookupError:
1458 except error.RepoLookupError:
1454 return False
1459 return False
1455
1460
1456 def __nonzero__(self):
1461 def __nonzero__(self):
1457 return True
1462 return True
1458
1463
1459 __bool__ = __nonzero__
1464 __bool__ = __nonzero__
1460
1465
1461 def __len__(self):
1466 def __len__(self):
1462 # no need to pay the cost of repoview.changelog
1467 # no need to pay the cost of repoview.changelog
1463 unfi = self.unfiltered()
1468 unfi = self.unfiltered()
1464 return len(unfi.changelog)
1469 return len(unfi.changelog)
1465
1470
1466 def __iter__(self):
1471 def __iter__(self):
1467 return iter(self.changelog)
1472 return iter(self.changelog)
1468
1473
1469 def revs(self, expr, *args):
1474 def revs(self, expr, *args):
1470 '''Find revisions matching a revset.
1475 '''Find revisions matching a revset.
1471
1476
1472 The revset is specified as a string ``expr`` that may contain
1477 The revset is specified as a string ``expr`` that may contain
1473 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1478 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1474
1479
1475 Revset aliases from the configuration are not expanded. To expand
1480 Revset aliases from the configuration are not expanded. To expand
1476 user aliases, consider calling ``scmutil.revrange()`` or
1481 user aliases, consider calling ``scmutil.revrange()`` or
1477 ``repo.anyrevs([expr], user=True)``.
1482 ``repo.anyrevs([expr], user=True)``.
1478
1483
1479 Returns a revset.abstractsmartset, which is a list-like interface
1484 Returns a revset.abstractsmartset, which is a list-like interface
1480 that contains integer revisions.
1485 that contains integer revisions.
1481 '''
1486 '''
1482 tree = revsetlang.spectree(expr, *args)
1487 tree = revsetlang.spectree(expr, *args)
1483 return revset.makematcher(tree)(self)
1488 return revset.makematcher(tree)(self)
1484
1489
1485 def set(self, expr, *args):
1490 def set(self, expr, *args):
1486 '''Find revisions matching a revset and emit changectx instances.
1491 '''Find revisions matching a revset and emit changectx instances.
1487
1492
1488 This is a convenience wrapper around ``revs()`` that iterates the
1493 This is a convenience wrapper around ``revs()`` that iterates the
1489 result and is a generator of changectx instances.
1494 result and is a generator of changectx instances.
1490
1495
1491 Revset aliases from the configuration are not expanded. To expand
1496 Revset aliases from the configuration are not expanded. To expand
1492 user aliases, consider calling ``scmutil.revrange()``.
1497 user aliases, consider calling ``scmutil.revrange()``.
1493 '''
1498 '''
1494 for r in self.revs(expr, *args):
1499 for r in self.revs(expr, *args):
1495 yield self[r]
1500 yield self[r]
1496
1501
1497 def anyrevs(self, specs, user=False, localalias=None):
1502 def anyrevs(self, specs, user=False, localalias=None):
1498 '''Find revisions matching one of the given revsets.
1503 '''Find revisions matching one of the given revsets.
1499
1504
1500 Revset aliases from the configuration are not expanded by default. To
1505 Revset aliases from the configuration are not expanded by default. To
1501 expand user aliases, specify ``user=True``. To provide some local
1506 expand user aliases, specify ``user=True``. To provide some local
1502 definitions overriding user aliases, set ``localalias`` to
1507 definitions overriding user aliases, set ``localalias`` to
1503 ``{name: definitionstring}``.
1508 ``{name: definitionstring}``.
1504 '''
1509 '''
1505 if user:
1510 if user:
1506 m = revset.matchany(self.ui, specs,
1511 m = revset.matchany(self.ui, specs,
1507 lookup=revset.lookupfn(self),
1512 lookup=revset.lookupfn(self),
1508 localalias=localalias)
1513 localalias=localalias)
1509 else:
1514 else:
1510 m = revset.matchany(None, specs, localalias=localalias)
1515 m = revset.matchany(None, specs, localalias=localalias)
1511 return m(self)
1516 return m(self)
1512
1517
1513 def url(self):
1518 def url(self):
1514 return 'file:' + self.root
1519 return 'file:' + self.root
1515
1520
1516 def hook(self, name, throw=False, **args):
1521 def hook(self, name, throw=False, **args):
1517 """Call a hook, passing this repo instance.
1522 """Call a hook, passing this repo instance.
1518
1523
1519 This a convenience method to aid invoking hooks. Extensions likely
1524 This a convenience method to aid invoking hooks. Extensions likely
1520 won't call this unless they have registered a custom hook or are
1525 won't call this unless they have registered a custom hook or are
1521 replacing code that is expected to call a hook.
1526 replacing code that is expected to call a hook.
1522 """
1527 """
1523 return hook.hook(self.ui, self, name, throw, **args)
1528 return hook.hook(self.ui, self, name, throw, **args)
1524
1529
1525 @filteredpropertycache
1530 @filteredpropertycache
1526 def _tagscache(self):
1531 def _tagscache(self):
1527 '''Returns a tagscache object that contains various tags related
1532 '''Returns a tagscache object that contains various tags related
1528 caches.'''
1533 caches.'''
1529
1534
1530 # This simplifies its cache management by having one decorated
1535 # This simplifies its cache management by having one decorated
1531 # function (this one) and the rest simply fetch things from it.
1536 # function (this one) and the rest simply fetch things from it.
1532 class tagscache(object):
1537 class tagscache(object):
1533 def __init__(self):
1538 def __init__(self):
1534 # These two define the set of tags for this repository. tags
1539 # These two define the set of tags for this repository. tags
1535 # maps tag name to node; tagtypes maps tag name to 'global' or
1540 # maps tag name to node; tagtypes maps tag name to 'global' or
1536 # 'local'. (Global tags are defined by .hgtags across all
1541 # 'local'. (Global tags are defined by .hgtags across all
1537 # heads, and local tags are defined in .hg/localtags.)
1542 # heads, and local tags are defined in .hg/localtags.)
1538 # They constitute the in-memory cache of tags.
1543 # They constitute the in-memory cache of tags.
1539 self.tags = self.tagtypes = None
1544 self.tags = self.tagtypes = None
1540
1545
1541 self.nodetagscache = self.tagslist = None
1546 self.nodetagscache = self.tagslist = None
1542
1547
1543 cache = tagscache()
1548 cache = tagscache()
1544 cache.tags, cache.tagtypes = self._findtags()
1549 cache.tags, cache.tagtypes = self._findtags()
1545
1550
1546 return cache
1551 return cache
1547
1552
1548 def tags(self):
1553 def tags(self):
1549 '''return a mapping of tag to node'''
1554 '''return a mapping of tag to node'''
1550 t = {}
1555 t = {}
1551 if self.changelog.filteredrevs:
1556 if self.changelog.filteredrevs:
1552 tags, tt = self._findtags()
1557 tags, tt = self._findtags()
1553 else:
1558 else:
1554 tags = self._tagscache.tags
1559 tags = self._tagscache.tags
1555 rev = self.changelog.rev
1560 rev = self.changelog.rev
1556 for k, v in tags.iteritems():
1561 for k, v in tags.iteritems():
1557 try:
1562 try:
1558 # ignore tags to unknown nodes
1563 # ignore tags to unknown nodes
1559 rev(v)
1564 rev(v)
1560 t[k] = v
1565 t[k] = v
1561 except (error.LookupError, ValueError):
1566 except (error.LookupError, ValueError):
1562 pass
1567 pass
1563 return t
1568 return t
1564
1569
1565 def _findtags(self):
1570 def _findtags(self):
1566 '''Do the hard work of finding tags. Return a pair of dicts
1571 '''Do the hard work of finding tags. Return a pair of dicts
1567 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1572 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1568 maps tag name to a string like \'global\' or \'local\'.
1573 maps tag name to a string like \'global\' or \'local\'.
1569 Subclasses or extensions are free to add their own tags, but
1574 Subclasses or extensions are free to add their own tags, but
1570 should be aware that the returned dicts will be retained for the
1575 should be aware that the returned dicts will be retained for the
1571 duration of the localrepo object.'''
1576 duration of the localrepo object.'''
1572
1577
1573 # XXX what tagtype should subclasses/extensions use? Currently
1578 # XXX what tagtype should subclasses/extensions use? Currently
1574 # mq and bookmarks add tags, but do not set the tagtype at all.
1579 # mq and bookmarks add tags, but do not set the tagtype at all.
1575 # Should each extension invent its own tag type? Should there
1580 # Should each extension invent its own tag type? Should there
1576 # be one tagtype for all such "virtual" tags? Or is the status
1581 # be one tagtype for all such "virtual" tags? Or is the status
1577 # quo fine?
1582 # quo fine?
1578
1583
1579
1584
1580 # map tag name to (node, hist)
1585 # map tag name to (node, hist)
1581 alltags = tagsmod.findglobaltags(self.ui, self)
1586 alltags = tagsmod.findglobaltags(self.ui, self)
1582 # map tag name to tag type
1587 # map tag name to tag type
1583 tagtypes = dict((tag, 'global') for tag in alltags)
1588 tagtypes = dict((tag, 'global') for tag in alltags)
1584
1589
1585 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1590 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1586
1591
1587 # Build the return dicts. Have to re-encode tag names because
1592 # Build the return dicts. Have to re-encode tag names because
1588 # the tags module always uses UTF-8 (in order not to lose info
1593 # the tags module always uses UTF-8 (in order not to lose info
1589 # writing to the cache), but the rest of Mercurial wants them in
1594 # writing to the cache), but the rest of Mercurial wants them in
1590 # local encoding.
1595 # local encoding.
1591 tags = {}
1596 tags = {}
1592 for (name, (node, hist)) in alltags.iteritems():
1597 for (name, (node, hist)) in alltags.iteritems():
1593 if node != nullid:
1598 if node != nullid:
1594 tags[encoding.tolocal(name)] = node
1599 tags[encoding.tolocal(name)] = node
1595 tags['tip'] = self.changelog.tip()
1600 tags['tip'] = self.changelog.tip()
1596 tagtypes = dict([(encoding.tolocal(name), value)
1601 tagtypes = dict([(encoding.tolocal(name), value)
1597 for (name, value) in tagtypes.iteritems()])
1602 for (name, value) in tagtypes.iteritems()])
1598 return (tags, tagtypes)
1603 return (tags, tagtypes)
1599
1604
1600 def tagtype(self, tagname):
1605 def tagtype(self, tagname):
1601 '''
1606 '''
1602 return the type of the given tag. result can be:
1607 return the type of the given tag. result can be:
1603
1608
1604 'local' : a local tag
1609 'local' : a local tag
1605 'global' : a global tag
1610 'global' : a global tag
1606 None : tag does not exist
1611 None : tag does not exist
1607 '''
1612 '''
1608
1613
1609 return self._tagscache.tagtypes.get(tagname)
1614 return self._tagscache.tagtypes.get(tagname)
1610
1615
1611 def tagslist(self):
1616 def tagslist(self):
1612 '''return a list of tags ordered by revision'''
1617 '''return a list of tags ordered by revision'''
1613 if not self._tagscache.tagslist:
1618 if not self._tagscache.tagslist:
1614 l = []
1619 l = []
1615 for t, n in self.tags().iteritems():
1620 for t, n in self.tags().iteritems():
1616 l.append((self.changelog.rev(n), t, n))
1621 l.append((self.changelog.rev(n), t, n))
1617 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1622 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1618
1623
1619 return self._tagscache.tagslist
1624 return self._tagscache.tagslist
1620
1625
1621 def nodetags(self, node):
1626 def nodetags(self, node):
1622 '''return the tags associated with a node'''
1627 '''return the tags associated with a node'''
1623 if not self._tagscache.nodetagscache:
1628 if not self._tagscache.nodetagscache:
1624 nodetagscache = {}
1629 nodetagscache = {}
1625 for t, n in self._tagscache.tags.iteritems():
1630 for t, n in self._tagscache.tags.iteritems():
1626 nodetagscache.setdefault(n, []).append(t)
1631 nodetagscache.setdefault(n, []).append(t)
1627 for tags in nodetagscache.itervalues():
1632 for tags in nodetagscache.itervalues():
1628 tags.sort()
1633 tags.sort()
1629 self._tagscache.nodetagscache = nodetagscache
1634 self._tagscache.nodetagscache = nodetagscache
1630 return self._tagscache.nodetagscache.get(node, [])
1635 return self._tagscache.nodetagscache.get(node, [])
1631
1636
1632 def nodebookmarks(self, node):
1637 def nodebookmarks(self, node):
1633 """return the list of bookmarks pointing to the specified node"""
1638 """return the list of bookmarks pointing to the specified node"""
1634 return self._bookmarks.names(node)
1639 return self._bookmarks.names(node)
1635
1640
1636 def branchmap(self):
1641 def branchmap(self):
1637 '''returns a dictionary {branch: [branchheads]} with branchheads
1642 '''returns a dictionary {branch: [branchheads]} with branchheads
1638 ordered by increasing revision number'''
1643 ordered by increasing revision number'''
1639 return self._branchcaches[self]
1644 return self._branchcaches[self]
1640
1645
1641 @unfilteredmethod
1646 @unfilteredmethod
1642 def revbranchcache(self):
1647 def revbranchcache(self):
1643 if not self._revbranchcache:
1648 if not self._revbranchcache:
1644 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1649 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1645 return self._revbranchcache
1650 return self._revbranchcache
1646
1651
1647 def branchtip(self, branch, ignoremissing=False):
1652 def branchtip(self, branch, ignoremissing=False):
1648 '''return the tip node for a given branch
1653 '''return the tip node for a given branch
1649
1654
1650 If ignoremissing is True, then this method will not raise an error.
1655 If ignoremissing is True, then this method will not raise an error.
1651 This is helpful for callers that only expect None for a missing branch
1656 This is helpful for callers that only expect None for a missing branch
1652 (e.g. namespace).
1657 (e.g. namespace).
1653
1658
1654 '''
1659 '''
1655 try:
1660 try:
1656 return self.branchmap().branchtip(branch)
1661 return self.branchmap().branchtip(branch)
1657 except KeyError:
1662 except KeyError:
1658 if not ignoremissing:
1663 if not ignoremissing:
1659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1664 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1660 else:
1665 else:
1661 pass
1666 pass
1662
1667
1663 def lookup(self, key):
1668 def lookup(self, key):
1664 node = scmutil.revsymbol(self, key).node()
1669 node = scmutil.revsymbol(self, key).node()
1665 if node is None:
1670 if node is None:
1666 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1671 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1667 return node
1672 return node
1668
1673
1669 def lookupbranch(self, key):
1674 def lookupbranch(self, key):
1670 if self.branchmap().hasbranch(key):
1675 if self.branchmap().hasbranch(key):
1671 return key
1676 return key
1672
1677
1673 return scmutil.revsymbol(self, key).branch()
1678 return scmutil.revsymbol(self, key).branch()
1674
1679
1675 def known(self, nodes):
1680 def known(self, nodes):
1676 cl = self.changelog
1681 cl = self.changelog
1677 nm = cl.nodemap
1682 nm = cl.nodemap
1678 filtered = cl.filteredrevs
1683 filtered = cl.filteredrevs
1679 result = []
1684 result = []
1680 for n in nodes:
1685 for n in nodes:
1681 r = nm.get(n)
1686 r = nm.get(n)
1682 resp = not (r is None or r in filtered)
1687 resp = not (r is None or r in filtered)
1683 result.append(resp)
1688 result.append(resp)
1684 return result
1689 return result
1685
1690
1686 def local(self):
1691 def local(self):
1687 return self
1692 return self
1688
1693
1689 def publishing(self):
1694 def publishing(self):
1690 # it's safe (and desirable) to trust the publish flag unconditionally
1695 # it's safe (and desirable) to trust the publish flag unconditionally
1691 # so that we don't finalize changes shared between users via ssh or nfs
1696 # so that we don't finalize changes shared between users via ssh or nfs
1692 return self.ui.configbool('phases', 'publish', untrusted=True)
1697 return self.ui.configbool('phases', 'publish', untrusted=True)
1693
1698
1694 def cancopy(self):
1699 def cancopy(self):
1695 # so statichttprepo's override of local() works
1700 # so statichttprepo's override of local() works
1696 if not self.local():
1701 if not self.local():
1697 return False
1702 return False
1698 if not self.publishing():
1703 if not self.publishing():
1699 return True
1704 return True
1700 # if publishing we can't copy if there is filtered content
1705 # if publishing we can't copy if there is filtered content
1701 return not self.filtered('visible').changelog.filteredrevs
1706 return not self.filtered('visible').changelog.filteredrevs
1702
1707
1703 def shared(self):
1708 def shared(self):
1704 '''the type of shared repository (None if not shared)'''
1709 '''the type of shared repository (None if not shared)'''
1705 if self.sharedpath != self.path:
1710 if self.sharedpath != self.path:
1706 return 'store'
1711 return 'store'
1707 return None
1712 return None
1708
1713
1709 def wjoin(self, f, *insidef):
1714 def wjoin(self, f, *insidef):
1710 return self.vfs.reljoin(self.root, f, *insidef)
1715 return self.vfs.reljoin(self.root, f, *insidef)
1711
1716
1712 def setparents(self, p1, p2=nullid):
1717 def setparents(self, p1, p2=nullid):
1713 with self.dirstate.parentchange():
1718 with self.dirstate.parentchange():
1714 copies = self.dirstate.setparents(p1, p2)
1719 copies = self.dirstate.setparents(p1, p2)
1715 pctx = self[p1]
1720 pctx = self[p1]
1716 if copies:
1721 if copies:
1717 # Adjust copy records, the dirstate cannot do it, it
1722 # Adjust copy records, the dirstate cannot do it, it
1718 # requires access to parents manifests. Preserve them
1723 # requires access to parents manifests. Preserve them
1719 # only for entries added to first parent.
1724 # only for entries added to first parent.
1720 for f in copies:
1725 for f in copies:
1721 if f not in pctx and copies[f] in pctx:
1726 if f not in pctx and copies[f] in pctx:
1722 self.dirstate.copy(copies[f], f)
1727 self.dirstate.copy(copies[f], f)
1723 if p2 == nullid:
1728 if p2 == nullid:
1724 for f, s in sorted(self.dirstate.copies().items()):
1729 for f, s in sorted(self.dirstate.copies().items()):
1725 if f not in pctx and s not in pctx:
1730 if f not in pctx and s not in pctx:
1726 self.dirstate.copy(None, f)
1731 self.dirstate.copy(None, f)
1727
1732
1728 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1733 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1729 """changeid must be a changeset revision, if specified.
1734 """changeid must be a changeset revision, if specified.
1730 fileid can be a file revision or node."""
1735 fileid can be a file revision or node."""
1731 return context.filectx(self, path, changeid, fileid,
1736 return context.filectx(self, path, changeid, fileid,
1732 changectx=changectx)
1737 changectx=changectx)
1733
1738
1734 def getcwd(self):
1739 def getcwd(self):
1735 return self.dirstate.getcwd()
1740 return self.dirstate.getcwd()
1736
1741
1737 def pathto(self, f, cwd=None):
1742 def pathto(self, f, cwd=None):
1738 return self.dirstate.pathto(f, cwd)
1743 return self.dirstate.pathto(f, cwd)
1739
1744
1740 def _loadfilter(self, filter):
1745 def _loadfilter(self, filter):
1741 if filter not in self._filterpats:
1746 if filter not in self._filterpats:
1742 l = []
1747 l = []
1743 for pat, cmd in self.ui.configitems(filter):
1748 for pat, cmd in self.ui.configitems(filter):
1744 if cmd == '!':
1749 if cmd == '!':
1745 continue
1750 continue
1746 mf = matchmod.match(self.root, '', [pat])
1751 mf = matchmod.match(self.root, '', [pat])
1747 fn = None
1752 fn = None
1748 params = cmd
1753 params = cmd
1749 for name, filterfn in self._datafilters.iteritems():
1754 for name, filterfn in self._datafilters.iteritems():
1750 if cmd.startswith(name):
1755 if cmd.startswith(name):
1751 fn = filterfn
1756 fn = filterfn
1752 params = cmd[len(name):].lstrip()
1757 params = cmd[len(name):].lstrip()
1753 break
1758 break
1754 if not fn:
1759 if not fn:
1755 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1760 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1756 # Wrap old filters not supporting keyword arguments
1761 # Wrap old filters not supporting keyword arguments
1757 if not pycompat.getargspec(fn)[2]:
1762 if not pycompat.getargspec(fn)[2]:
1758 oldfn = fn
1763 oldfn = fn
1759 fn = lambda s, c, **kwargs: oldfn(s, c)
1764 fn = lambda s, c, **kwargs: oldfn(s, c)
1760 l.append((mf, fn, params))
1765 l.append((mf, fn, params))
1761 self._filterpats[filter] = l
1766 self._filterpats[filter] = l
1762 return self._filterpats[filter]
1767 return self._filterpats[filter]
1763
1768
1764 def _filter(self, filterpats, filename, data):
1769 def _filter(self, filterpats, filename, data):
1765 for mf, fn, cmd in filterpats:
1770 for mf, fn, cmd in filterpats:
1766 if mf(filename):
1771 if mf(filename):
1767 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1772 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1768 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1773 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1769 break
1774 break
1770
1775
1771 return data
1776 return data
1772
1777
1773 @unfilteredpropertycache
1778 @unfilteredpropertycache
1774 def _encodefilterpats(self):
1779 def _encodefilterpats(self):
1775 return self._loadfilter('encode')
1780 return self._loadfilter('encode')
1776
1781
1777 @unfilteredpropertycache
1782 @unfilteredpropertycache
1778 def _decodefilterpats(self):
1783 def _decodefilterpats(self):
1779 return self._loadfilter('decode')
1784 return self._loadfilter('decode')
1780
1785
1781 def adddatafilter(self, name, filter):
1786 def adddatafilter(self, name, filter):
1782 self._datafilters[name] = filter
1787 self._datafilters[name] = filter
1783
1788
1784 def wread(self, filename):
1789 def wread(self, filename):
1785 if self.wvfs.islink(filename):
1790 if self.wvfs.islink(filename):
1786 data = self.wvfs.readlink(filename)
1791 data = self.wvfs.readlink(filename)
1787 else:
1792 else:
1788 data = self.wvfs.read(filename)
1793 data = self.wvfs.read(filename)
1789 return self._filter(self._encodefilterpats, filename, data)
1794 return self._filter(self._encodefilterpats, filename, data)
1790
1795
1791 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1796 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1792 """write ``data`` into ``filename`` in the working directory
1797 """write ``data`` into ``filename`` in the working directory
1793
1798
1794 This returns length of written (maybe decoded) data.
1799 This returns length of written (maybe decoded) data.
1795 """
1800 """
1796 data = self._filter(self._decodefilterpats, filename, data)
1801 data = self._filter(self._decodefilterpats, filename, data)
1797 if 'l' in flags:
1802 if 'l' in flags:
1798 self.wvfs.symlink(data, filename)
1803 self.wvfs.symlink(data, filename)
1799 else:
1804 else:
1800 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1805 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1801 **kwargs)
1806 **kwargs)
1802 if 'x' in flags:
1807 if 'x' in flags:
1803 self.wvfs.setflags(filename, False, True)
1808 self.wvfs.setflags(filename, False, True)
1804 else:
1809 else:
1805 self.wvfs.setflags(filename, False, False)
1810 self.wvfs.setflags(filename, False, False)
1806 return len(data)
1811 return len(data)
1807
1812
1808 def wwritedata(self, filename, data):
1813 def wwritedata(self, filename, data):
1809 return self._filter(self._decodefilterpats, filename, data)
1814 return self._filter(self._decodefilterpats, filename, data)
1810
1815
1811 def currenttransaction(self):
1816 def currenttransaction(self):
1812 """return the current transaction or None if non exists"""
1817 """return the current transaction or None if non exists"""
1813 if self._transref:
1818 if self._transref:
1814 tr = self._transref()
1819 tr = self._transref()
1815 else:
1820 else:
1816 tr = None
1821 tr = None
1817
1822
1818 if tr and tr.running():
1823 if tr and tr.running():
1819 return tr
1824 return tr
1820 return None
1825 return None
1821
1826
1822 def transaction(self, desc, report=None):
1827 def transaction(self, desc, report=None):
1823 if (self.ui.configbool('devel', 'all-warnings')
1828 if (self.ui.configbool('devel', 'all-warnings')
1824 or self.ui.configbool('devel', 'check-locks')):
1829 or self.ui.configbool('devel', 'check-locks')):
1825 if self._currentlock(self._lockref) is None:
1830 if self._currentlock(self._lockref) is None:
1826 raise error.ProgrammingError('transaction requires locking')
1831 raise error.ProgrammingError('transaction requires locking')
1827 tr = self.currenttransaction()
1832 tr = self.currenttransaction()
1828 if tr is not None:
1833 if tr is not None:
1829 return tr.nest(name=desc)
1834 return tr.nest(name=desc)
1830
1835
1831 # abort here if the journal already exists
1836 # abort here if the journal already exists
1832 if self.svfs.exists("journal"):
1837 if self.svfs.exists("journal"):
1833 raise error.RepoError(
1838 raise error.RepoError(
1834 _("abandoned transaction found"),
1839 _("abandoned transaction found"),
1835 hint=_("run 'hg recover' to clean up transaction"))
1840 hint=_("run 'hg recover' to clean up transaction"))
1836
1841
1837 idbase = "%.40f#%f" % (random.random(), time.time())
1842 idbase = "%.40f#%f" % (random.random(), time.time())
1838 ha = hex(hashlib.sha1(idbase).digest())
1843 ha = hex(hashlib.sha1(idbase).digest())
1839 txnid = 'TXN:' + ha
1844 txnid = 'TXN:' + ha
1840 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1845 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1841
1846
1842 self._writejournal(desc)
1847 self._writejournal(desc)
1843 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1848 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1844 if report:
1849 if report:
1845 rp = report
1850 rp = report
1846 else:
1851 else:
1847 rp = self.ui.warn
1852 rp = self.ui.warn
1848 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1853 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1849 # we must avoid cyclic reference between repo and transaction.
1854 # we must avoid cyclic reference between repo and transaction.
1850 reporef = weakref.ref(self)
1855 reporef = weakref.ref(self)
1851 # Code to track tag movement
1856 # Code to track tag movement
1852 #
1857 #
1853 # Since tags are all handled as file content, it is actually quite hard
1858 # Since tags are all handled as file content, it is actually quite hard
1854 # to track these movement from a code perspective. So we fallback to a
1859 # to track these movement from a code perspective. So we fallback to a
1855 # tracking at the repository level. One could envision to track changes
1860 # tracking at the repository level. One could envision to track changes
1856 # to the '.hgtags' file through changegroup apply but that fails to
1861 # to the '.hgtags' file through changegroup apply but that fails to
1857 # cope with case where transaction expose new heads without changegroup
1862 # cope with case where transaction expose new heads without changegroup
1858 # being involved (eg: phase movement).
1863 # being involved (eg: phase movement).
1859 #
1864 #
1860 # For now, We gate the feature behind a flag since this likely comes
1865 # For now, We gate the feature behind a flag since this likely comes
1861 # with performance impacts. The current code run more often than needed
1866 # with performance impacts. The current code run more often than needed
1862 # and do not use caches as much as it could. The current focus is on
1867 # and do not use caches as much as it could. The current focus is on
1863 # the behavior of the feature so we disable it by default. The flag
1868 # the behavior of the feature so we disable it by default. The flag
1864 # will be removed when we are happy with the performance impact.
1869 # will be removed when we are happy with the performance impact.
1865 #
1870 #
1866 # Once this feature is no longer experimental move the following
1871 # Once this feature is no longer experimental move the following
1867 # documentation to the appropriate help section:
1872 # documentation to the appropriate help section:
1868 #
1873 #
1869 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1874 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1870 # tags (new or changed or deleted tags). In addition the details of
1875 # tags (new or changed or deleted tags). In addition the details of
1871 # these changes are made available in a file at:
1876 # these changes are made available in a file at:
1872 # ``REPOROOT/.hg/changes/tags.changes``.
1877 # ``REPOROOT/.hg/changes/tags.changes``.
1873 # Make sure you check for HG_TAG_MOVED before reading that file as it
1878 # Make sure you check for HG_TAG_MOVED before reading that file as it
1874 # might exist from a previous transaction even if no tag were touched
1879 # might exist from a previous transaction even if no tag were touched
1875 # in this one. Changes are recorded in a line base format::
1880 # in this one. Changes are recorded in a line base format::
1876 #
1881 #
1877 # <action> <hex-node> <tag-name>\n
1882 # <action> <hex-node> <tag-name>\n
1878 #
1883 #
1879 # Actions are defined as follow:
1884 # Actions are defined as follow:
1880 # "-R": tag is removed,
1885 # "-R": tag is removed,
1881 # "+A": tag is added,
1886 # "+A": tag is added,
1882 # "-M": tag is moved (old value),
1887 # "-M": tag is moved (old value),
1883 # "+M": tag is moved (new value),
1888 # "+M": tag is moved (new value),
1884 tracktags = lambda x: None
1889 tracktags = lambda x: None
1885 # experimental config: experimental.hook-track-tags
1890 # experimental config: experimental.hook-track-tags
1886 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1891 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1887 if desc != 'strip' and shouldtracktags:
1892 if desc != 'strip' and shouldtracktags:
1888 oldheads = self.changelog.headrevs()
1893 oldheads = self.changelog.headrevs()
1889 def tracktags(tr2):
1894 def tracktags(tr2):
1890 repo = reporef()
1895 repo = reporef()
1891 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1896 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1892 newheads = repo.changelog.headrevs()
1897 newheads = repo.changelog.headrevs()
1893 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1898 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1894 # notes: we compare lists here.
1899 # notes: we compare lists here.
1895 # As we do it only once buiding set would not be cheaper
1900 # As we do it only once buiding set would not be cheaper
1896 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1901 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1897 if changes:
1902 if changes:
1898 tr2.hookargs['tag_moved'] = '1'
1903 tr2.hookargs['tag_moved'] = '1'
1899 with repo.vfs('changes/tags.changes', 'w',
1904 with repo.vfs('changes/tags.changes', 'w',
1900 atomictemp=True) as changesfile:
1905 atomictemp=True) as changesfile:
1901 # note: we do not register the file to the transaction
1906 # note: we do not register the file to the transaction
1902 # because we needs it to still exist on the transaction
1907 # because we needs it to still exist on the transaction
1903 # is close (for txnclose hooks)
1908 # is close (for txnclose hooks)
1904 tagsmod.writediff(changesfile, changes)
1909 tagsmod.writediff(changesfile, changes)
1905 def validate(tr2):
1910 def validate(tr2):
1906 """will run pre-closing hooks"""
1911 """will run pre-closing hooks"""
1907 # XXX the transaction API is a bit lacking here so we take a hacky
1912 # XXX the transaction API is a bit lacking here so we take a hacky
1908 # path for now
1913 # path for now
1909 #
1914 #
1910 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1915 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1911 # dict is copied before these run. In addition we needs the data
1916 # dict is copied before these run. In addition we needs the data
1912 # available to in memory hooks too.
1917 # available to in memory hooks too.
1913 #
1918 #
1914 # Moreover, we also need to make sure this runs before txnclose
1919 # Moreover, we also need to make sure this runs before txnclose
1915 # hooks and there is no "pending" mechanism that would execute
1920 # hooks and there is no "pending" mechanism that would execute
1916 # logic only if hooks are about to run.
1921 # logic only if hooks are about to run.
1917 #
1922 #
1918 # Fixing this limitation of the transaction is also needed to track
1923 # Fixing this limitation of the transaction is also needed to track
1919 # other families of changes (bookmarks, phases, obsolescence).
1924 # other families of changes (bookmarks, phases, obsolescence).
1920 #
1925 #
1921 # This will have to be fixed before we remove the experimental
1926 # This will have to be fixed before we remove the experimental
1922 # gating.
1927 # gating.
1923 tracktags(tr2)
1928 tracktags(tr2)
1924 repo = reporef()
1929 repo = reporef()
1925
1930
1926 r = repo.ui.configsuboptions('experimental',
1931 r = repo.ui.configsuboptions('experimental',
1927 'single-head-per-branch')
1932 'single-head-per-branch')
1928 singlehead, singleheadsub = r
1933 singlehead, singleheadsub = r
1929 if singlehead:
1934 if singlehead:
1930 accountclosed = singleheadsub.get("account-closed-heads", False)
1935 accountclosed = singleheadsub.get("account-closed-heads", False)
1931 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
1936 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
1932 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1937 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1933 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1938 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1934 args = tr.hookargs.copy()
1939 args = tr.hookargs.copy()
1935 args.update(bookmarks.preparehookargs(name, old, new))
1940 args.update(bookmarks.preparehookargs(name, old, new))
1936 repo.hook('pretxnclose-bookmark', throw=True,
1941 repo.hook('pretxnclose-bookmark', throw=True,
1937 **pycompat.strkwargs(args))
1942 **pycompat.strkwargs(args))
1938 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1943 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1939 cl = repo.unfiltered().changelog
1944 cl = repo.unfiltered().changelog
1940 for rev, (old, new) in tr.changes['phases'].items():
1945 for rev, (old, new) in tr.changes['phases'].items():
1941 args = tr.hookargs.copy()
1946 args = tr.hookargs.copy()
1942 node = hex(cl.node(rev))
1947 node = hex(cl.node(rev))
1943 args.update(phases.preparehookargs(node, old, new))
1948 args.update(phases.preparehookargs(node, old, new))
1944 repo.hook('pretxnclose-phase', throw=True,
1949 repo.hook('pretxnclose-phase', throw=True,
1945 **pycompat.strkwargs(args))
1950 **pycompat.strkwargs(args))
1946
1951
1947 repo.hook('pretxnclose', throw=True,
1952 repo.hook('pretxnclose', throw=True,
1948 **pycompat.strkwargs(tr.hookargs))
1953 **pycompat.strkwargs(tr.hookargs))
1949 def releasefn(tr, success):
1954 def releasefn(tr, success):
1950 repo = reporef()
1955 repo = reporef()
1951 if repo is None:
1956 if repo is None:
1952 # If the repo has been GC'd (and this release function is being
1957 # If the repo has been GC'd (and this release function is being
1953 # called from transaction.__del__), there's not much we can do,
1958 # called from transaction.__del__), there's not much we can do,
1954 # so just leave the unfinished transaction there and let the
1959 # so just leave the unfinished transaction there and let the
1955 # user run `hg recover`.
1960 # user run `hg recover`.
1956 return
1961 return
1957 if success:
1962 if success:
1958 # this should be explicitly invoked here, because
1963 # this should be explicitly invoked here, because
1959 # in-memory changes aren't written out at closing
1964 # in-memory changes aren't written out at closing
1960 # transaction, if tr.addfilegenerator (via
1965 # transaction, if tr.addfilegenerator (via
1961 # dirstate.write or so) isn't invoked while
1966 # dirstate.write or so) isn't invoked while
1962 # transaction running
1967 # transaction running
1963 repo.dirstate.write(None)
1968 repo.dirstate.write(None)
1964 else:
1969 else:
1965 # discard all changes (including ones already written
1970 # discard all changes (including ones already written
1966 # out) in this transaction
1971 # out) in this transaction
1967 narrowspec.restorebackup(self, 'journal.narrowspec')
1972 narrowspec.restorebackup(self, 'journal.narrowspec')
1968 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1973 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1969 repo.dirstate.restorebackup(None, 'journal.dirstate')
1974 repo.dirstate.restorebackup(None, 'journal.dirstate')
1970
1975
1971 repo.invalidate(clearfilecache=True)
1976 repo.invalidate(clearfilecache=True)
1972
1977
1973 tr = transaction.transaction(rp, self.svfs, vfsmap,
1978 tr = transaction.transaction(rp, self.svfs, vfsmap,
1974 "journal",
1979 "journal",
1975 "undo",
1980 "undo",
1976 aftertrans(renames),
1981 aftertrans(renames),
1977 self.store.createmode,
1982 self.store.createmode,
1978 validator=validate,
1983 validator=validate,
1979 releasefn=releasefn,
1984 releasefn=releasefn,
1980 checkambigfiles=_cachedfiles,
1985 checkambigfiles=_cachedfiles,
1981 name=desc)
1986 name=desc)
1982 tr.changes['origrepolen'] = len(self)
1987 tr.changes['origrepolen'] = len(self)
1983 tr.changes['obsmarkers'] = set()
1988 tr.changes['obsmarkers'] = set()
1984 tr.changes['phases'] = {}
1989 tr.changes['phases'] = {}
1985 tr.changes['bookmarks'] = {}
1990 tr.changes['bookmarks'] = {}
1986
1991
1987 tr.hookargs['txnid'] = txnid
1992 tr.hookargs['txnid'] = txnid
1988 tr.hookargs['txnname'] = desc
1993 tr.hookargs['txnname'] = desc
1989 # note: writing the fncache only during finalize mean that the file is
1994 # note: writing the fncache only during finalize mean that the file is
1990 # outdated when running hooks. As fncache is used for streaming clone,
1995 # outdated when running hooks. As fncache is used for streaming clone,
1991 # this is not expected to break anything that happen during the hooks.
1996 # this is not expected to break anything that happen during the hooks.
1992 tr.addfinalize('flush-fncache', self.store.write)
1997 tr.addfinalize('flush-fncache', self.store.write)
1993 def txnclosehook(tr2):
1998 def txnclosehook(tr2):
1994 """To be run if transaction is successful, will schedule a hook run
1999 """To be run if transaction is successful, will schedule a hook run
1995 """
2000 """
1996 # Don't reference tr2 in hook() so we don't hold a reference.
2001 # Don't reference tr2 in hook() so we don't hold a reference.
1997 # This reduces memory consumption when there are multiple
2002 # This reduces memory consumption when there are multiple
1998 # transactions per lock. This can likely go away if issue5045
2003 # transactions per lock. This can likely go away if issue5045
1999 # fixes the function accumulation.
2004 # fixes the function accumulation.
2000 hookargs = tr2.hookargs
2005 hookargs = tr2.hookargs
2001
2006
2002 def hookfunc():
2007 def hookfunc():
2003 repo = reporef()
2008 repo = reporef()
2004 if hook.hashook(repo.ui, 'txnclose-bookmark'):
2009 if hook.hashook(repo.ui, 'txnclose-bookmark'):
2005 bmchanges = sorted(tr.changes['bookmarks'].items())
2010 bmchanges = sorted(tr.changes['bookmarks'].items())
2006 for name, (old, new) in bmchanges:
2011 for name, (old, new) in bmchanges:
2007 args = tr.hookargs.copy()
2012 args = tr.hookargs.copy()
2008 args.update(bookmarks.preparehookargs(name, old, new))
2013 args.update(bookmarks.preparehookargs(name, old, new))
2009 repo.hook('txnclose-bookmark', throw=False,
2014 repo.hook('txnclose-bookmark', throw=False,
2010 **pycompat.strkwargs(args))
2015 **pycompat.strkwargs(args))
2011
2016
2012 if hook.hashook(repo.ui, 'txnclose-phase'):
2017 if hook.hashook(repo.ui, 'txnclose-phase'):
2013 cl = repo.unfiltered().changelog
2018 cl = repo.unfiltered().changelog
2014 phasemv = sorted(tr.changes['phases'].items())
2019 phasemv = sorted(tr.changes['phases'].items())
2015 for rev, (old, new) in phasemv:
2020 for rev, (old, new) in phasemv:
2016 args = tr.hookargs.copy()
2021 args = tr.hookargs.copy()
2017 node = hex(cl.node(rev))
2022 node = hex(cl.node(rev))
2018 args.update(phases.preparehookargs(node, old, new))
2023 args.update(phases.preparehookargs(node, old, new))
2019 repo.hook('txnclose-phase', throw=False,
2024 repo.hook('txnclose-phase', throw=False,
2020 **pycompat.strkwargs(args))
2025 **pycompat.strkwargs(args))
2021
2026
2022 repo.hook('txnclose', throw=False,
2027 repo.hook('txnclose', throw=False,
2023 **pycompat.strkwargs(hookargs))
2028 **pycompat.strkwargs(hookargs))
2024 reporef()._afterlock(hookfunc)
2029 reporef()._afterlock(hookfunc)
2025 tr.addfinalize('txnclose-hook', txnclosehook)
2030 tr.addfinalize('txnclose-hook', txnclosehook)
2026 # Include a leading "-" to make it happen before the transaction summary
2031 # Include a leading "-" to make it happen before the transaction summary
2027 # reports registered via scmutil.registersummarycallback() whose names
2032 # reports registered via scmutil.registersummarycallback() whose names
2028 # are 00-txnreport etc. That way, the caches will be warm when the
2033 # are 00-txnreport etc. That way, the caches will be warm when the
2029 # callbacks run.
2034 # callbacks run.
2030 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
2035 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
2031 def txnaborthook(tr2):
2036 def txnaborthook(tr2):
2032 """To be run if transaction is aborted
2037 """To be run if transaction is aborted
2033 """
2038 """
2034 reporef().hook('txnabort', throw=False,
2039 reporef().hook('txnabort', throw=False,
2035 **pycompat.strkwargs(tr2.hookargs))
2040 **pycompat.strkwargs(tr2.hookargs))
2036 tr.addabort('txnabort-hook', txnaborthook)
2041 tr.addabort('txnabort-hook', txnaborthook)
2037 # avoid eager cache invalidation. in-memory data should be identical
2042 # avoid eager cache invalidation. in-memory data should be identical
2038 # to stored data if transaction has no error.
2043 # to stored data if transaction has no error.
2039 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
2044 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
2040 self._transref = weakref.ref(tr)
2045 self._transref = weakref.ref(tr)
2041 scmutil.registersummarycallback(self, tr, desc)
2046 scmutil.registersummarycallback(self, tr, desc)
2042 return tr
2047 return tr
2043
2048
2044 def _journalfiles(self):
2049 def _journalfiles(self):
2045 return ((self.svfs, 'journal'),
2050 return ((self.svfs, 'journal'),
2046 (self.svfs, 'journal.narrowspec'),
2051 (self.svfs, 'journal.narrowspec'),
2047 (self.vfs, 'journal.narrowspec.dirstate'),
2052 (self.vfs, 'journal.narrowspec.dirstate'),
2048 (self.vfs, 'journal.dirstate'),
2053 (self.vfs, 'journal.dirstate'),
2049 (self.vfs, 'journal.branch'),
2054 (self.vfs, 'journal.branch'),
2050 (self.vfs, 'journal.desc'),
2055 (self.vfs, 'journal.desc'),
2051 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
2056 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
2052 (self.svfs, 'journal.phaseroots'))
2057 (self.svfs, 'journal.phaseroots'))
2053
2058
2054 def undofiles(self):
2059 def undofiles(self):
2055 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2060 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2056
2061
2057 @unfilteredmethod
2062 @unfilteredmethod
2058 def _writejournal(self, desc):
2063 def _writejournal(self, desc):
2059 self.dirstate.savebackup(None, 'journal.dirstate')
2064 self.dirstate.savebackup(None, 'journal.dirstate')
2060 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2065 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2061 narrowspec.savebackup(self, 'journal.narrowspec')
2066 narrowspec.savebackup(self, 'journal.narrowspec')
2062 self.vfs.write("journal.branch",
2067 self.vfs.write("journal.branch",
2063 encoding.fromlocal(self.dirstate.branch()))
2068 encoding.fromlocal(self.dirstate.branch()))
2064 self.vfs.write("journal.desc",
2069 self.vfs.write("journal.desc",
2065 "%d\n%s\n" % (len(self), desc))
2070 "%d\n%s\n" % (len(self), desc))
2066 bookmarksvfs = bookmarks.bookmarksvfs(self)
2071 bookmarksvfs = bookmarks.bookmarksvfs(self)
2067 bookmarksvfs.write("journal.bookmarks",
2072 bookmarksvfs.write("journal.bookmarks",
2068 bookmarksvfs.tryread("bookmarks"))
2073 bookmarksvfs.tryread("bookmarks"))
2069 self.svfs.write("journal.phaseroots",
2074 self.svfs.write("journal.phaseroots",
2070 self.svfs.tryread("phaseroots"))
2075 self.svfs.tryread("phaseroots"))
2071
2076
2072 def recover(self):
2077 def recover(self):
2073 with self.lock():
2078 with self.lock():
2074 if self.svfs.exists("journal"):
2079 if self.svfs.exists("journal"):
2075 self.ui.status(_("rolling back interrupted transaction\n"))
2080 self.ui.status(_("rolling back interrupted transaction\n"))
2076 vfsmap = {'': self.svfs,
2081 vfsmap = {'': self.svfs,
2077 'plain': self.vfs,}
2082 'plain': self.vfs,}
2078 transaction.rollback(self.svfs, vfsmap, "journal",
2083 transaction.rollback(self.svfs, vfsmap, "journal",
2079 self.ui.warn,
2084 self.ui.warn,
2080 checkambigfiles=_cachedfiles)
2085 checkambigfiles=_cachedfiles)
2081 self.invalidate()
2086 self.invalidate()
2082 return True
2087 return True
2083 else:
2088 else:
2084 self.ui.warn(_("no interrupted transaction available\n"))
2089 self.ui.warn(_("no interrupted transaction available\n"))
2085 return False
2090 return False
2086
2091
2087 def rollback(self, dryrun=False, force=False):
2092 def rollback(self, dryrun=False, force=False):
2088 wlock = lock = dsguard = None
2093 wlock = lock = dsguard = None
2089 try:
2094 try:
2090 wlock = self.wlock()
2095 wlock = self.wlock()
2091 lock = self.lock()
2096 lock = self.lock()
2092 if self.svfs.exists("undo"):
2097 if self.svfs.exists("undo"):
2093 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2098 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2094
2099
2095 return self._rollback(dryrun, force, dsguard)
2100 return self._rollback(dryrun, force, dsguard)
2096 else:
2101 else:
2097 self.ui.warn(_("no rollback information available\n"))
2102 self.ui.warn(_("no rollback information available\n"))
2098 return 1
2103 return 1
2099 finally:
2104 finally:
2100 release(dsguard, lock, wlock)
2105 release(dsguard, lock, wlock)
2101
2106
2102 @unfilteredmethod # Until we get smarter cache management
2107 @unfilteredmethod # Until we get smarter cache management
2103 def _rollback(self, dryrun, force, dsguard):
2108 def _rollback(self, dryrun, force, dsguard):
2104 ui = self.ui
2109 ui = self.ui
2105 try:
2110 try:
2106 args = self.vfs.read('undo.desc').splitlines()
2111 args = self.vfs.read('undo.desc').splitlines()
2107 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2112 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2108 if len(args) >= 3:
2113 if len(args) >= 3:
2109 detail = args[2]
2114 detail = args[2]
2110 oldtip = oldlen - 1
2115 oldtip = oldlen - 1
2111
2116
2112 if detail and ui.verbose:
2117 if detail and ui.verbose:
2113 msg = (_('repository tip rolled back to revision %d'
2118 msg = (_('repository tip rolled back to revision %d'
2114 ' (undo %s: %s)\n')
2119 ' (undo %s: %s)\n')
2115 % (oldtip, desc, detail))
2120 % (oldtip, desc, detail))
2116 else:
2121 else:
2117 msg = (_('repository tip rolled back to revision %d'
2122 msg = (_('repository tip rolled back to revision %d'
2118 ' (undo %s)\n')
2123 ' (undo %s)\n')
2119 % (oldtip, desc))
2124 % (oldtip, desc))
2120 except IOError:
2125 except IOError:
2121 msg = _('rolling back unknown transaction\n')
2126 msg = _('rolling back unknown transaction\n')
2122 desc = None
2127 desc = None
2123
2128
2124 if not force and self['.'] != self['tip'] and desc == 'commit':
2129 if not force and self['.'] != self['tip'] and desc == 'commit':
2125 raise error.Abort(
2130 raise error.Abort(
2126 _('rollback of last commit while not checked out '
2131 _('rollback of last commit while not checked out '
2127 'may lose data'), hint=_('use -f to force'))
2132 'may lose data'), hint=_('use -f to force'))
2128
2133
2129 ui.status(msg)
2134 ui.status(msg)
2130 if dryrun:
2135 if dryrun:
2131 return 0
2136 return 0
2132
2137
2133 parents = self.dirstate.parents()
2138 parents = self.dirstate.parents()
2134 self.destroying()
2139 self.destroying()
2135 vfsmap = {'plain': self.vfs, '': self.svfs}
2140 vfsmap = {'plain': self.vfs, '': self.svfs}
2136 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2141 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2137 checkambigfiles=_cachedfiles)
2142 checkambigfiles=_cachedfiles)
2138 bookmarksvfs = bookmarks.bookmarksvfs(self)
2143 bookmarksvfs = bookmarks.bookmarksvfs(self)
2139 if bookmarksvfs.exists('undo.bookmarks'):
2144 if bookmarksvfs.exists('undo.bookmarks'):
2140 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2145 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2141 if self.svfs.exists('undo.phaseroots'):
2146 if self.svfs.exists('undo.phaseroots'):
2142 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2147 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2143 self.invalidate()
2148 self.invalidate()
2144
2149
2145 parentgone = any(p not in self.changelog.nodemap for p in parents)
2150 parentgone = any(p not in self.changelog.nodemap for p in parents)
2146 if parentgone:
2151 if parentgone:
2147 # prevent dirstateguard from overwriting already restored one
2152 # prevent dirstateguard from overwriting already restored one
2148 dsguard.close()
2153 dsguard.close()
2149
2154
2150 narrowspec.restorebackup(self, 'undo.narrowspec')
2155 narrowspec.restorebackup(self, 'undo.narrowspec')
2151 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2156 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2152 self.dirstate.restorebackup(None, 'undo.dirstate')
2157 self.dirstate.restorebackup(None, 'undo.dirstate')
2153 try:
2158 try:
2154 branch = self.vfs.read('undo.branch')
2159 branch = self.vfs.read('undo.branch')
2155 self.dirstate.setbranch(encoding.tolocal(branch))
2160 self.dirstate.setbranch(encoding.tolocal(branch))
2156 except IOError:
2161 except IOError:
2157 ui.warn(_('named branch could not be reset: '
2162 ui.warn(_('named branch could not be reset: '
2158 'current branch is still \'%s\'\n')
2163 'current branch is still \'%s\'\n')
2159 % self.dirstate.branch())
2164 % self.dirstate.branch())
2160
2165
2161 parents = tuple([p.rev() for p in self[None].parents()])
2166 parents = tuple([p.rev() for p in self[None].parents()])
2162 if len(parents) > 1:
2167 if len(parents) > 1:
2163 ui.status(_('working directory now based on '
2168 ui.status(_('working directory now based on '
2164 'revisions %d and %d\n') % parents)
2169 'revisions %d and %d\n') % parents)
2165 else:
2170 else:
2166 ui.status(_('working directory now based on '
2171 ui.status(_('working directory now based on '
2167 'revision %d\n') % parents)
2172 'revision %d\n') % parents)
2168 mergemod.mergestate.clean(self, self['.'].node())
2173 mergemod.mergestate.clean(self, self['.'].node())
2169
2174
2170 # TODO: if we know which new heads may result from this rollback, pass
2175 # TODO: if we know which new heads may result from this rollback, pass
2171 # them to destroy(), which will prevent the branchhead cache from being
2176 # them to destroy(), which will prevent the branchhead cache from being
2172 # invalidated.
2177 # invalidated.
2173 self.destroyed()
2178 self.destroyed()
2174 return 0
2179 return 0
2175
2180
2176 def _buildcacheupdater(self, newtransaction):
2181 def _buildcacheupdater(self, newtransaction):
2177 """called during transaction to build the callback updating cache
2182 """called during transaction to build the callback updating cache
2178
2183
2179 Lives on the repository to help extension who might want to augment
2184 Lives on the repository to help extension who might want to augment
2180 this logic. For this purpose, the created transaction is passed to the
2185 this logic. For this purpose, the created transaction is passed to the
2181 method.
2186 method.
2182 """
2187 """
2183 # we must avoid cyclic reference between repo and transaction.
2188 # we must avoid cyclic reference between repo and transaction.
2184 reporef = weakref.ref(self)
2189 reporef = weakref.ref(self)
2185 def updater(tr):
2190 def updater(tr):
2186 repo = reporef()
2191 repo = reporef()
2187 repo.updatecaches(tr)
2192 repo.updatecaches(tr)
2188 return updater
2193 return updater
2189
2194
2190 @unfilteredmethod
2195 @unfilteredmethod
2191 def updatecaches(self, tr=None, full=False):
2196 def updatecaches(self, tr=None, full=False):
2192 """warm appropriate caches
2197 """warm appropriate caches
2193
2198
2194 If this function is called after a transaction closed. The transaction
2199 If this function is called after a transaction closed. The transaction
2195 will be available in the 'tr' argument. This can be used to selectively
2200 will be available in the 'tr' argument. This can be used to selectively
2196 update caches relevant to the changes in that transaction.
2201 update caches relevant to the changes in that transaction.
2197
2202
2198 If 'full' is set, make sure all caches the function knows about have
2203 If 'full' is set, make sure all caches the function knows about have
2199 up-to-date data. Even the ones usually loaded more lazily.
2204 up-to-date data. Even the ones usually loaded more lazily.
2200 """
2205 """
2201 if tr is not None and tr.hookargs.get('source') == 'strip':
2206 if tr is not None and tr.hookargs.get('source') == 'strip':
2202 # During strip, many caches are invalid but
2207 # During strip, many caches are invalid but
2203 # later call to `destroyed` will refresh them.
2208 # later call to `destroyed` will refresh them.
2204 return
2209 return
2205
2210
2206 if tr is None or tr.changes['origrepolen'] < len(self):
2211 if tr is None or tr.changes['origrepolen'] < len(self):
2207 # accessing the 'ser ved' branchmap should refresh all the others,
2212 # accessing the 'ser ved' branchmap should refresh all the others,
2208 self.ui.debug('updating the branch cache\n')
2213 self.ui.debug('updating the branch cache\n')
2209 self.filtered('served').branchmap()
2214 self.filtered('served').branchmap()
2210 self.filtered('served.hidden').branchmap()
2215 self.filtered('served.hidden').branchmap()
2211
2216
2212 if full:
2217 if full:
2213 unfi = self.unfiltered()
2218 unfi = self.unfiltered()
2214 rbc = unfi.revbranchcache()
2219 rbc = unfi.revbranchcache()
2215 for r in unfi.changelog:
2220 for r in unfi.changelog:
2216 rbc.branchinfo(r)
2221 rbc.branchinfo(r)
2217 rbc.write()
2222 rbc.write()
2218
2223
2219 # ensure the working copy parents are in the manifestfulltextcache
2224 # ensure the working copy parents are in the manifestfulltextcache
2220 for ctx in self['.'].parents():
2225 for ctx in self['.'].parents():
2221 ctx.manifest() # accessing the manifest is enough
2226 ctx.manifest() # accessing the manifest is enough
2222
2227
2223 # accessing fnode cache warms the cache
2228 # accessing fnode cache warms the cache
2224 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2229 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2225 # accessing tags warm the cache
2230 # accessing tags warm the cache
2226 self.tags()
2231 self.tags()
2227 self.filtered('served').tags()
2232 self.filtered('served').tags()
2228
2233
2229 # The `full` arg is documented as updating even the lazily-loaded
2234 # The `full` arg is documented as updating even the lazily-loaded
2230 # caches immediately, so we're forcing a write to cause these caches
2235 # caches immediately, so we're forcing a write to cause these caches
2231 # to be warmed up even if they haven't explicitly been requested
2236 # to be warmed up even if they haven't explicitly been requested
2232 # yet (if they've never been used by hg, they won't ever have been
2237 # yet (if they've never been used by hg, they won't ever have been
2233 # written, even if they're a subset of another kind of cache that
2238 # written, even if they're a subset of another kind of cache that
2234 # *has* been used).
2239 # *has* been used).
2235 for filt in repoview.filtertable.keys():
2240 for filt in repoview.filtertable.keys():
2236 filtered = self.filtered(filt)
2241 filtered = self.filtered(filt)
2237 filtered.branchmap().write(filtered)
2242 filtered.branchmap().write(filtered)
2238
2243
2239 def invalidatecaches(self):
2244 def invalidatecaches(self):
2240
2245
2241 if r'_tagscache' in vars(self):
2246 if r'_tagscache' in vars(self):
2242 # can't use delattr on proxy
2247 # can't use delattr on proxy
2243 del self.__dict__[r'_tagscache']
2248 del self.__dict__[r'_tagscache']
2244
2249
2245 self._branchcaches.clear()
2250 self._branchcaches.clear()
2246 self.invalidatevolatilesets()
2251 self.invalidatevolatilesets()
2247 self._sparsesignaturecache.clear()
2252 self._sparsesignaturecache.clear()
2248
2253
2249 def invalidatevolatilesets(self):
2254 def invalidatevolatilesets(self):
2250 self.filteredrevcache.clear()
2255 self.filteredrevcache.clear()
2251 obsolete.clearobscaches(self)
2256 obsolete.clearobscaches(self)
2252
2257
2253 def invalidatedirstate(self):
2258 def invalidatedirstate(self):
2254 '''Invalidates the dirstate, causing the next call to dirstate
2259 '''Invalidates the dirstate, causing the next call to dirstate
2255 to check if it was modified since the last time it was read,
2260 to check if it was modified since the last time it was read,
2256 rereading it if it has.
2261 rereading it if it has.
2257
2262
2258 This is different to dirstate.invalidate() that it doesn't always
2263 This is different to dirstate.invalidate() that it doesn't always
2259 rereads the dirstate. Use dirstate.invalidate() if you want to
2264 rereads the dirstate. Use dirstate.invalidate() if you want to
2260 explicitly read the dirstate again (i.e. restoring it to a previous
2265 explicitly read the dirstate again (i.e. restoring it to a previous
2261 known good state).'''
2266 known good state).'''
2262 if hasunfilteredcache(self, r'dirstate'):
2267 if hasunfilteredcache(self, r'dirstate'):
2263 for k in self.dirstate._filecache:
2268 for k in self.dirstate._filecache:
2264 try:
2269 try:
2265 delattr(self.dirstate, k)
2270 delattr(self.dirstate, k)
2266 except AttributeError:
2271 except AttributeError:
2267 pass
2272 pass
2268 delattr(self.unfiltered(), r'dirstate')
2273 delattr(self.unfiltered(), r'dirstate')
2269
2274
2270 def invalidate(self, clearfilecache=False):
2275 def invalidate(self, clearfilecache=False):
2271 '''Invalidates both store and non-store parts other than dirstate
2276 '''Invalidates both store and non-store parts other than dirstate
2272
2277
2273 If a transaction is running, invalidation of store is omitted,
2278 If a transaction is running, invalidation of store is omitted,
2274 because discarding in-memory changes might cause inconsistency
2279 because discarding in-memory changes might cause inconsistency
2275 (e.g. incomplete fncache causes unintentional failure, but
2280 (e.g. incomplete fncache causes unintentional failure, but
2276 redundant one doesn't).
2281 redundant one doesn't).
2277 '''
2282 '''
2278 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2283 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2279 for k in list(self._filecache.keys()):
2284 for k in list(self._filecache.keys()):
2280 # dirstate is invalidated separately in invalidatedirstate()
2285 # dirstate is invalidated separately in invalidatedirstate()
2281 if k == 'dirstate':
2286 if k == 'dirstate':
2282 continue
2287 continue
2283 if (k == 'changelog' and
2288 if (k == 'changelog' and
2284 self.currenttransaction() and
2289 self.currenttransaction() and
2285 self.changelog._delayed):
2290 self.changelog._delayed):
2286 # The changelog object may store unwritten revisions. We don't
2291 # The changelog object may store unwritten revisions. We don't
2287 # want to lose them.
2292 # want to lose them.
2288 # TODO: Solve the problem instead of working around it.
2293 # TODO: Solve the problem instead of working around it.
2289 continue
2294 continue
2290
2295
2291 if clearfilecache:
2296 if clearfilecache:
2292 del self._filecache[k]
2297 del self._filecache[k]
2293 try:
2298 try:
2294 delattr(unfiltered, k)
2299 delattr(unfiltered, k)
2295 except AttributeError:
2300 except AttributeError:
2296 pass
2301 pass
2297 self.invalidatecaches()
2302 self.invalidatecaches()
2298 if not self.currenttransaction():
2303 if not self.currenttransaction():
2299 # TODO: Changing contents of store outside transaction
2304 # TODO: Changing contents of store outside transaction
2300 # causes inconsistency. We should make in-memory store
2305 # causes inconsistency. We should make in-memory store
2301 # changes detectable, and abort if changed.
2306 # changes detectable, and abort if changed.
2302 self.store.invalidatecaches()
2307 self.store.invalidatecaches()
2303
2308
2304 def invalidateall(self):
2309 def invalidateall(self):
2305 '''Fully invalidates both store and non-store parts, causing the
2310 '''Fully invalidates both store and non-store parts, causing the
2306 subsequent operation to reread any outside changes.'''
2311 subsequent operation to reread any outside changes.'''
2307 # extension should hook this to invalidate its caches
2312 # extension should hook this to invalidate its caches
2308 self.invalidate()
2313 self.invalidate()
2309 self.invalidatedirstate()
2314 self.invalidatedirstate()
2310
2315
2311 @unfilteredmethod
2316 @unfilteredmethod
2312 def _refreshfilecachestats(self, tr):
2317 def _refreshfilecachestats(self, tr):
2313 """Reload stats of cached files so that they are flagged as valid"""
2318 """Reload stats of cached files so that they are flagged as valid"""
2314 for k, ce in self._filecache.items():
2319 for k, ce in self._filecache.items():
2315 k = pycompat.sysstr(k)
2320 k = pycompat.sysstr(k)
2316 if k == r'dirstate' or k not in self.__dict__:
2321 if k == r'dirstate' or k not in self.__dict__:
2317 continue
2322 continue
2318 ce.refresh()
2323 ce.refresh()
2319
2324
2320 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2325 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2321 inheritchecker=None, parentenvvar=None):
2326 inheritchecker=None, parentenvvar=None):
2322 parentlock = None
2327 parentlock = None
2323 # the contents of parentenvvar are used by the underlying lock to
2328 # the contents of parentenvvar are used by the underlying lock to
2324 # determine whether it can be inherited
2329 # determine whether it can be inherited
2325 if parentenvvar is not None:
2330 if parentenvvar is not None:
2326 parentlock = encoding.environ.get(parentenvvar)
2331 parentlock = encoding.environ.get(parentenvvar)
2327
2332
2328 timeout = 0
2333 timeout = 0
2329 warntimeout = 0
2334 warntimeout = 0
2330 if wait:
2335 if wait:
2331 timeout = self.ui.configint("ui", "timeout")
2336 timeout = self.ui.configint("ui", "timeout")
2332 warntimeout = self.ui.configint("ui", "timeout.warn")
2337 warntimeout = self.ui.configint("ui", "timeout.warn")
2333 # internal config: ui.signal-safe-lock
2338 # internal config: ui.signal-safe-lock
2334 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2339 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2335
2340
2336 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2341 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2337 releasefn=releasefn,
2342 releasefn=releasefn,
2338 acquirefn=acquirefn, desc=desc,
2343 acquirefn=acquirefn, desc=desc,
2339 inheritchecker=inheritchecker,
2344 inheritchecker=inheritchecker,
2340 parentlock=parentlock,
2345 parentlock=parentlock,
2341 signalsafe=signalsafe)
2346 signalsafe=signalsafe)
2342 return l
2347 return l
2343
2348
2344 def _afterlock(self, callback):
2349 def _afterlock(self, callback):
2345 """add a callback to be run when the repository is fully unlocked
2350 """add a callback to be run when the repository is fully unlocked
2346
2351
2347 The callback will be executed when the outermost lock is released
2352 The callback will be executed when the outermost lock is released
2348 (with wlock being higher level than 'lock')."""
2353 (with wlock being higher level than 'lock')."""
2349 for ref in (self._wlockref, self._lockref):
2354 for ref in (self._wlockref, self._lockref):
2350 l = ref and ref()
2355 l = ref and ref()
2351 if l and l.held:
2356 if l and l.held:
2352 l.postrelease.append(callback)
2357 l.postrelease.append(callback)
2353 break
2358 break
2354 else: # no lock have been found.
2359 else: # no lock have been found.
2355 callback()
2360 callback()
2356
2361
2357 def lock(self, wait=True):
2362 def lock(self, wait=True):
2358 '''Lock the repository store (.hg/store) and return a weak reference
2363 '''Lock the repository store (.hg/store) and return a weak reference
2359 to the lock. Use this before modifying the store (e.g. committing or
2364 to the lock. Use this before modifying the store (e.g. committing or
2360 stripping). If you are opening a transaction, get a lock as well.)
2365 stripping). If you are opening a transaction, get a lock as well.)
2361
2366
2362 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2367 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2363 'wlock' first to avoid a dead-lock hazard.'''
2368 'wlock' first to avoid a dead-lock hazard.'''
2364 l = self._currentlock(self._lockref)
2369 l = self._currentlock(self._lockref)
2365 if l is not None:
2370 if l is not None:
2366 l.lock()
2371 l.lock()
2367 return l
2372 return l
2368
2373
2369 l = self._lock(vfs=self.svfs,
2374 l = self._lock(vfs=self.svfs,
2370 lockname="lock",
2375 lockname="lock",
2371 wait=wait,
2376 wait=wait,
2372 releasefn=None,
2377 releasefn=None,
2373 acquirefn=self.invalidate,
2378 acquirefn=self.invalidate,
2374 desc=_('repository %s') % self.origroot)
2379 desc=_('repository %s') % self.origroot)
2375 self._lockref = weakref.ref(l)
2380 self._lockref = weakref.ref(l)
2376 return l
2381 return l
2377
2382
2378 def _wlockchecktransaction(self):
2383 def _wlockchecktransaction(self):
2379 if self.currenttransaction() is not None:
2384 if self.currenttransaction() is not None:
2380 raise error.LockInheritanceContractViolation(
2385 raise error.LockInheritanceContractViolation(
2381 'wlock cannot be inherited in the middle of a transaction')
2386 'wlock cannot be inherited in the middle of a transaction')
2382
2387
2383 def wlock(self, wait=True):
2388 def wlock(self, wait=True):
2384 '''Lock the non-store parts of the repository (everything under
2389 '''Lock the non-store parts of the repository (everything under
2385 .hg except .hg/store) and return a weak reference to the lock.
2390 .hg except .hg/store) and return a weak reference to the lock.
2386
2391
2387 Use this before modifying files in .hg.
2392 Use this before modifying files in .hg.
2388
2393
2389 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2394 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2390 'wlock' first to avoid a dead-lock hazard.'''
2395 'wlock' first to avoid a dead-lock hazard.'''
2391 l = self._wlockref and self._wlockref()
2396 l = self._wlockref and self._wlockref()
2392 if l is not None and l.held:
2397 if l is not None and l.held:
2393 l.lock()
2398 l.lock()
2394 return l
2399 return l
2395
2400
2396 # We do not need to check for non-waiting lock acquisition. Such
2401 # We do not need to check for non-waiting lock acquisition. Such
2397 # acquisition would not cause dead-lock as they would just fail.
2402 # acquisition would not cause dead-lock as they would just fail.
2398 if wait and (self.ui.configbool('devel', 'all-warnings')
2403 if wait and (self.ui.configbool('devel', 'all-warnings')
2399 or self.ui.configbool('devel', 'check-locks')):
2404 or self.ui.configbool('devel', 'check-locks')):
2400 if self._currentlock(self._lockref) is not None:
2405 if self._currentlock(self._lockref) is not None:
2401 self.ui.develwarn('"wlock" acquired after "lock"')
2406 self.ui.develwarn('"wlock" acquired after "lock"')
2402
2407
2403 def unlock():
2408 def unlock():
2404 if self.dirstate.pendingparentchange():
2409 if self.dirstate.pendingparentchange():
2405 self.dirstate.invalidate()
2410 self.dirstate.invalidate()
2406 else:
2411 else:
2407 self.dirstate.write(None)
2412 self.dirstate.write(None)
2408
2413
2409 self._filecache['dirstate'].refresh()
2414 self._filecache['dirstate'].refresh()
2410
2415
2411 l = self._lock(self.vfs, "wlock", wait, unlock,
2416 l = self._lock(self.vfs, "wlock", wait, unlock,
2412 self.invalidatedirstate, _('working directory of %s') %
2417 self.invalidatedirstate, _('working directory of %s') %
2413 self.origroot,
2418 self.origroot,
2414 inheritchecker=self._wlockchecktransaction,
2419 inheritchecker=self._wlockchecktransaction,
2415 parentenvvar='HG_WLOCK_LOCKER')
2420 parentenvvar='HG_WLOCK_LOCKER')
2416 self._wlockref = weakref.ref(l)
2421 self._wlockref = weakref.ref(l)
2417 return l
2422 return l
2418
2423
2419 def _currentlock(self, lockref):
2424 def _currentlock(self, lockref):
2420 """Returns the lock if it's held, or None if it's not."""
2425 """Returns the lock if it's held, or None if it's not."""
2421 if lockref is None:
2426 if lockref is None:
2422 return None
2427 return None
2423 l = lockref()
2428 l = lockref()
2424 if l is None or not l.held:
2429 if l is None or not l.held:
2425 return None
2430 return None
2426 return l
2431 return l
2427
2432
2428 def currentwlock(self):
2433 def currentwlock(self):
2429 """Returns the wlock if it's held, or None if it's not."""
2434 """Returns the wlock if it's held, or None if it's not."""
2430 return self._currentlock(self._wlockref)
2435 return self._currentlock(self._wlockref)
2431
2436
2432 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2437 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2433 includecopymeta):
2438 includecopymeta):
2434 """
2439 """
2435 commit an individual file as part of a larger transaction
2440 commit an individual file as part of a larger transaction
2436 """
2441 """
2437
2442
2438 fname = fctx.path()
2443 fname = fctx.path()
2439 fparent1 = manifest1.get(fname, nullid)
2444 fparent1 = manifest1.get(fname, nullid)
2440 fparent2 = manifest2.get(fname, nullid)
2445 fparent2 = manifest2.get(fname, nullid)
2441 if isinstance(fctx, context.filectx):
2446 if isinstance(fctx, context.filectx):
2442 node = fctx.filenode()
2447 node = fctx.filenode()
2443 if node in [fparent1, fparent2]:
2448 if node in [fparent1, fparent2]:
2444 self.ui.debug('reusing %s filelog entry\n' % fname)
2449 self.ui.debug('reusing %s filelog entry\n' % fname)
2445 if ((fparent1 != nullid and
2450 if ((fparent1 != nullid and
2446 manifest1.flags(fname) != fctx.flags()) or
2451 manifest1.flags(fname) != fctx.flags()) or
2447 (fparent2 != nullid and
2452 (fparent2 != nullid and
2448 manifest2.flags(fname) != fctx.flags())):
2453 manifest2.flags(fname) != fctx.flags())):
2449 changelist.append(fname)
2454 changelist.append(fname)
2450 return node
2455 return node
2451
2456
2452 flog = self.file(fname)
2457 flog = self.file(fname)
2453 meta = {}
2458 meta = {}
2454 cfname = fctx.copysource()
2459 cfname = fctx.copysource()
2455 if cfname and cfname != fname:
2460 if cfname and cfname != fname:
2456 # Mark the new revision of this file as a copy of another
2461 # Mark the new revision of this file as a copy of another
2457 # file. This copy data will effectively act as a parent
2462 # file. This copy data will effectively act as a parent
2458 # of this new revision. If this is a merge, the first
2463 # of this new revision. If this is a merge, the first
2459 # parent will be the nullid (meaning "look up the copy data")
2464 # parent will be the nullid (meaning "look up the copy data")
2460 # and the second one will be the other parent. For example:
2465 # and the second one will be the other parent. For example:
2461 #
2466 #
2462 # 0 --- 1 --- 3 rev1 changes file foo
2467 # 0 --- 1 --- 3 rev1 changes file foo
2463 # \ / rev2 renames foo to bar and changes it
2468 # \ / rev2 renames foo to bar and changes it
2464 # \- 2 -/ rev3 should have bar with all changes and
2469 # \- 2 -/ rev3 should have bar with all changes and
2465 # should record that bar descends from
2470 # should record that bar descends from
2466 # bar in rev2 and foo in rev1
2471 # bar in rev2 and foo in rev1
2467 #
2472 #
2468 # this allows this merge to succeed:
2473 # this allows this merge to succeed:
2469 #
2474 #
2470 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2475 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2471 # \ / merging rev3 and rev4 should use bar@rev2
2476 # \ / merging rev3 and rev4 should use bar@rev2
2472 # \- 2 --- 4 as the merge base
2477 # \- 2 --- 4 as the merge base
2473 #
2478 #
2474
2479
2475 cnode = manifest1.get(cfname)
2480 cnode = manifest1.get(cfname)
2476 newfparent = fparent2
2481 newfparent = fparent2
2477
2482
2478 if manifest2: # branch merge
2483 if manifest2: # branch merge
2479 if fparent2 == nullid or cnode is None: # copied on remote side
2484 if fparent2 == nullid or cnode is None: # copied on remote side
2480 if cfname in manifest2:
2485 if cfname in manifest2:
2481 cnode = manifest2[cfname]
2486 cnode = manifest2[cfname]
2482 newfparent = fparent1
2487 newfparent = fparent1
2483
2488
2484 # Here, we used to search backwards through history to try to find
2489 # Here, we used to search backwards through history to try to find
2485 # where the file copy came from if the source of a copy was not in
2490 # where the file copy came from if the source of a copy was not in
2486 # the parent directory. However, this doesn't actually make sense to
2491 # the parent directory. However, this doesn't actually make sense to
2487 # do (what does a copy from something not in your working copy even
2492 # do (what does a copy from something not in your working copy even
2488 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2493 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2489 # the user that copy information was dropped, so if they didn't
2494 # the user that copy information was dropped, so if they didn't
2490 # expect this outcome it can be fixed, but this is the correct
2495 # expect this outcome it can be fixed, but this is the correct
2491 # behavior in this circumstance.
2496 # behavior in this circumstance.
2492
2497
2493 if cnode:
2498 if cnode:
2494 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2499 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2495 if includecopymeta:
2500 if includecopymeta:
2496 meta["copy"] = cfname
2501 meta["copy"] = cfname
2497 meta["copyrev"] = hex(cnode)
2502 meta["copyrev"] = hex(cnode)
2498 fparent1, fparent2 = nullid, newfparent
2503 fparent1, fparent2 = nullid, newfparent
2499 else:
2504 else:
2500 self.ui.warn(_("warning: can't find ancestor for '%s' "
2505 self.ui.warn(_("warning: can't find ancestor for '%s' "
2501 "copied from '%s'!\n") % (fname, cfname))
2506 "copied from '%s'!\n") % (fname, cfname))
2502
2507
2503 elif fparent1 == nullid:
2508 elif fparent1 == nullid:
2504 fparent1, fparent2 = fparent2, nullid
2509 fparent1, fparent2 = fparent2, nullid
2505 elif fparent2 != nullid:
2510 elif fparent2 != nullid:
2506 # is one parent an ancestor of the other?
2511 # is one parent an ancestor of the other?
2507 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2512 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2508 if fparent1 in fparentancestors:
2513 if fparent1 in fparentancestors:
2509 fparent1, fparent2 = fparent2, nullid
2514 fparent1, fparent2 = fparent2, nullid
2510 elif fparent2 in fparentancestors:
2515 elif fparent2 in fparentancestors:
2511 fparent2 = nullid
2516 fparent2 = nullid
2512
2517
2513 # is the file changed?
2518 # is the file changed?
2514 text = fctx.data()
2519 text = fctx.data()
2515 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2520 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2516 changelist.append(fname)
2521 changelist.append(fname)
2517 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2522 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2518 # are just the flags changed during merge?
2523 # are just the flags changed during merge?
2519 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2524 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2520 changelist.append(fname)
2525 changelist.append(fname)
2521
2526
2522 return fparent1
2527 return fparent1
2523
2528
2524 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2529 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2525 """check for commit arguments that aren't committable"""
2530 """check for commit arguments that aren't committable"""
2526 if match.isexact() or match.prefix():
2531 if match.isexact() or match.prefix():
2527 matched = set(status.modified + status.added + status.removed)
2532 matched = set(status.modified + status.added + status.removed)
2528
2533
2529 for f in match.files():
2534 for f in match.files():
2530 f = self.dirstate.normalize(f)
2535 f = self.dirstate.normalize(f)
2531 if f == '.' or f in matched or f in wctx.substate:
2536 if f == '.' or f in matched or f in wctx.substate:
2532 continue
2537 continue
2533 if f in status.deleted:
2538 if f in status.deleted:
2534 fail(f, _('file not found!'))
2539 fail(f, _('file not found!'))
2535 if f in vdirs: # visited directory
2540 if f in vdirs: # visited directory
2536 d = f + '/'
2541 d = f + '/'
2537 for mf in matched:
2542 for mf in matched:
2538 if mf.startswith(d):
2543 if mf.startswith(d):
2539 break
2544 break
2540 else:
2545 else:
2541 fail(f, _("no match under directory!"))
2546 fail(f, _("no match under directory!"))
2542 elif f not in self.dirstate:
2547 elif f not in self.dirstate:
2543 fail(f, _("file not tracked!"))
2548 fail(f, _("file not tracked!"))
2544
2549
2545 @unfilteredmethod
2550 @unfilteredmethod
2546 def commit(self, text="", user=None, date=None, match=None, force=False,
2551 def commit(self, text="", user=None, date=None, match=None, force=False,
2547 editor=False, extra=None):
2552 editor=False, extra=None):
2548 """Add a new revision to current repository.
2553 """Add a new revision to current repository.
2549
2554
2550 Revision information is gathered from the working directory,
2555 Revision information is gathered from the working directory,
2551 match can be used to filter the committed files. If editor is
2556 match can be used to filter the committed files. If editor is
2552 supplied, it is called to get a commit message.
2557 supplied, it is called to get a commit message.
2553 """
2558 """
2554 if extra is None:
2559 if extra is None:
2555 extra = {}
2560 extra = {}
2556
2561
2557 def fail(f, msg):
2562 def fail(f, msg):
2558 raise error.Abort('%s: %s' % (f, msg))
2563 raise error.Abort('%s: %s' % (f, msg))
2559
2564
2560 if not match:
2565 if not match:
2561 match = matchmod.always()
2566 match = matchmod.always()
2562
2567
2563 if not force:
2568 if not force:
2564 vdirs = []
2569 vdirs = []
2565 match.explicitdir = vdirs.append
2570 match.explicitdir = vdirs.append
2566 match.bad = fail
2571 match.bad = fail
2567
2572
2568 # lock() for recent changelog (see issue4368)
2573 # lock() for recent changelog (see issue4368)
2569 with self.wlock(), self.lock():
2574 with self.wlock(), self.lock():
2570 wctx = self[None]
2575 wctx = self[None]
2571 merge = len(wctx.parents()) > 1
2576 merge = len(wctx.parents()) > 1
2572
2577
2573 if not force and merge and not match.always():
2578 if not force and merge and not match.always():
2574 raise error.Abort(_('cannot partially commit a merge '
2579 raise error.Abort(_('cannot partially commit a merge '
2575 '(do not specify files or patterns)'))
2580 '(do not specify files or patterns)'))
2576
2581
2577 status = self.status(match=match, clean=force)
2582 status = self.status(match=match, clean=force)
2578 if force:
2583 if force:
2579 status.modified.extend(status.clean) # mq may commit clean files
2584 status.modified.extend(status.clean) # mq may commit clean files
2580
2585
2581 # check subrepos
2586 # check subrepos
2582 subs, commitsubs, newstate = subrepoutil.precommit(
2587 subs, commitsubs, newstate = subrepoutil.precommit(
2583 self.ui, wctx, status, match, force=force)
2588 self.ui, wctx, status, match, force=force)
2584
2589
2585 # make sure all explicit patterns are matched
2590 # make sure all explicit patterns are matched
2586 if not force:
2591 if not force:
2587 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2592 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2588
2593
2589 cctx = context.workingcommitctx(self, status,
2594 cctx = context.workingcommitctx(self, status,
2590 text, user, date, extra)
2595 text, user, date, extra)
2591
2596
2592 # internal config: ui.allowemptycommit
2597 # internal config: ui.allowemptycommit
2593 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2598 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2594 or extra.get('close') or merge or cctx.files()
2599 or extra.get('close') or merge or cctx.files()
2595 or self.ui.configbool('ui', 'allowemptycommit'))
2600 or self.ui.configbool('ui', 'allowemptycommit'))
2596 if not allowemptycommit:
2601 if not allowemptycommit:
2597 return None
2602 return None
2598
2603
2599 if merge and cctx.deleted():
2604 if merge and cctx.deleted():
2600 raise error.Abort(_("cannot commit merge with missing files"))
2605 raise error.Abort(_("cannot commit merge with missing files"))
2601
2606
2602 ms = mergemod.mergestate.read(self)
2607 ms = mergemod.mergestate.read(self)
2603 mergeutil.checkunresolved(ms)
2608 mergeutil.checkunresolved(ms)
2604
2609
2605 if editor:
2610 if editor:
2606 cctx._text = editor(self, cctx, subs)
2611 cctx._text = editor(self, cctx, subs)
2607 edited = (text != cctx._text)
2612 edited = (text != cctx._text)
2608
2613
2609 # Save commit message in case this transaction gets rolled back
2614 # Save commit message in case this transaction gets rolled back
2610 # (e.g. by a pretxncommit hook). Leave the content alone on
2615 # (e.g. by a pretxncommit hook). Leave the content alone on
2611 # the assumption that the user will use the same editor again.
2616 # the assumption that the user will use the same editor again.
2612 msgfn = self.savecommitmessage(cctx._text)
2617 msgfn = self.savecommitmessage(cctx._text)
2613
2618
2614 # commit subs and write new state
2619 # commit subs and write new state
2615 if subs:
2620 if subs:
2616 uipathfn = scmutil.getuipathfn(self)
2621 uipathfn = scmutil.getuipathfn(self)
2617 for s in sorted(commitsubs):
2622 for s in sorted(commitsubs):
2618 sub = wctx.sub(s)
2623 sub = wctx.sub(s)
2619 self.ui.status(_('committing subrepository %s\n') %
2624 self.ui.status(_('committing subrepository %s\n') %
2620 uipathfn(subrepoutil.subrelpath(sub)))
2625 uipathfn(subrepoutil.subrelpath(sub)))
2621 sr = sub.commit(cctx._text, user, date)
2626 sr = sub.commit(cctx._text, user, date)
2622 newstate[s] = (newstate[s][0], sr)
2627 newstate[s] = (newstate[s][0], sr)
2623 subrepoutil.writestate(self, newstate)
2628 subrepoutil.writestate(self, newstate)
2624
2629
2625 p1, p2 = self.dirstate.parents()
2630 p1, p2 = self.dirstate.parents()
2626 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2631 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2627 try:
2632 try:
2628 self.hook("precommit", throw=True, parent1=hookp1,
2633 self.hook("precommit", throw=True, parent1=hookp1,
2629 parent2=hookp2)
2634 parent2=hookp2)
2630 with self.transaction('commit'):
2635 with self.transaction('commit'):
2631 ret = self.commitctx(cctx, True)
2636 ret = self.commitctx(cctx, True)
2632 # update bookmarks, dirstate and mergestate
2637 # update bookmarks, dirstate and mergestate
2633 bookmarks.update(self, [p1, p2], ret)
2638 bookmarks.update(self, [p1, p2], ret)
2634 cctx.markcommitted(ret)
2639 cctx.markcommitted(ret)
2635 ms.reset()
2640 ms.reset()
2636 except: # re-raises
2641 except: # re-raises
2637 if edited:
2642 if edited:
2638 self.ui.write(
2643 self.ui.write(
2639 _('note: commit message saved in %s\n') % msgfn)
2644 _('note: commit message saved in %s\n') % msgfn)
2640 raise
2645 raise
2641
2646
2642 def commithook():
2647 def commithook():
2643 # hack for command that use a temporary commit (eg: histedit)
2648 # hack for command that use a temporary commit (eg: histedit)
2644 # temporary commit got stripped before hook release
2649 # temporary commit got stripped before hook release
2645 if self.changelog.hasnode(ret):
2650 if self.changelog.hasnode(ret):
2646 self.hook("commit", node=hex(ret), parent1=hookp1,
2651 self.hook("commit", node=hex(ret), parent1=hookp1,
2647 parent2=hookp2)
2652 parent2=hookp2)
2648 self._afterlock(commithook)
2653 self._afterlock(commithook)
2649 return ret
2654 return ret
2650
2655
2651 @unfilteredmethod
2656 @unfilteredmethod
2652 def commitctx(self, ctx, error=False, origctx=None):
2657 def commitctx(self, ctx, error=False, origctx=None):
2653 """Add a new revision to current repository.
2658 """Add a new revision to current repository.
2654 Revision information is passed via the context argument.
2659 Revision information is passed via the context argument.
2655
2660
2656 ctx.files() should list all files involved in this commit, i.e.
2661 ctx.files() should list all files involved in this commit, i.e.
2657 modified/added/removed files. On merge, it may be wider than the
2662 modified/added/removed files. On merge, it may be wider than the
2658 ctx.files() to be committed, since any file nodes derived directly
2663 ctx.files() to be committed, since any file nodes derived directly
2659 from p1 or p2 are excluded from the committed ctx.files().
2664 from p1 or p2 are excluded from the committed ctx.files().
2660
2665
2661 origctx is for convert to work around the problem that bug
2666 origctx is for convert to work around the problem that bug
2662 fixes to the files list in changesets change hashes. For
2667 fixes to the files list in changesets change hashes. For
2663 convert to be the identity, it can pass an origctx and this
2668 convert to be the identity, it can pass an origctx and this
2664 function will use the same files list when it makes sense to
2669 function will use the same files list when it makes sense to
2665 do so.
2670 do so.
2666 """
2671 """
2667
2672
2668 p1, p2 = ctx.p1(), ctx.p2()
2673 p1, p2 = ctx.p1(), ctx.p2()
2669 user = ctx.user()
2674 user = ctx.user()
2670
2675
2671 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2676 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2672 writefilecopymeta = writecopiesto != 'changeset-only'
2677 writefilecopymeta = writecopiesto != 'changeset-only'
2673 writechangesetcopy = (writecopiesto in
2678 writechangesetcopy = (writecopiesto in
2674 ('changeset-only', 'compatibility'))
2679 ('changeset-only', 'compatibility'))
2675 p1copies, p2copies = None, None
2680 p1copies, p2copies = None, None
2676 if writechangesetcopy:
2681 if writechangesetcopy:
2677 p1copies = ctx.p1copies()
2682 p1copies = ctx.p1copies()
2678 p2copies = ctx.p2copies()
2683 p2copies = ctx.p2copies()
2679 filesadded, filesremoved = None, None
2684 filesadded, filesremoved = None, None
2680 with self.lock(), self.transaction("commit") as tr:
2685 with self.lock(), self.transaction("commit") as tr:
2681 trp = weakref.proxy(tr)
2686 trp = weakref.proxy(tr)
2682
2687
2683 if ctx.manifestnode():
2688 if ctx.manifestnode():
2684 # reuse an existing manifest revision
2689 # reuse an existing manifest revision
2685 self.ui.debug('reusing known manifest\n')
2690 self.ui.debug('reusing known manifest\n')
2686 mn = ctx.manifestnode()
2691 mn = ctx.manifestnode()
2687 files = ctx.files()
2692 files = ctx.files()
2688 if writechangesetcopy:
2693 if writechangesetcopy:
2689 filesadded = ctx.filesadded()
2694 filesadded = ctx.filesadded()
2690 filesremoved = ctx.filesremoved()
2695 filesremoved = ctx.filesremoved()
2691 elif ctx.files():
2696 elif ctx.files():
2692 m1ctx = p1.manifestctx()
2697 m1ctx = p1.manifestctx()
2693 m2ctx = p2.manifestctx()
2698 m2ctx = p2.manifestctx()
2694 mctx = m1ctx.copy()
2699 mctx = m1ctx.copy()
2695
2700
2696 m = mctx.read()
2701 m = mctx.read()
2697 m1 = m1ctx.read()
2702 m1 = m1ctx.read()
2698 m2 = m2ctx.read()
2703 m2 = m2ctx.read()
2699
2704
2700 # check in files
2705 # check in files
2701 added = []
2706 added = []
2702 changed = []
2707 changed = []
2703 removed = list(ctx.removed())
2708 removed = list(ctx.removed())
2704 linkrev = len(self)
2709 linkrev = len(self)
2705 self.ui.note(_("committing files:\n"))
2710 self.ui.note(_("committing files:\n"))
2706 uipathfn = scmutil.getuipathfn(self)
2711 uipathfn = scmutil.getuipathfn(self)
2707 for f in sorted(ctx.modified() + ctx.added()):
2712 for f in sorted(ctx.modified() + ctx.added()):
2708 self.ui.note(uipathfn(f) + "\n")
2713 self.ui.note(uipathfn(f) + "\n")
2709 try:
2714 try:
2710 fctx = ctx[f]
2715 fctx = ctx[f]
2711 if fctx is None:
2716 if fctx is None:
2712 removed.append(f)
2717 removed.append(f)
2713 else:
2718 else:
2714 added.append(f)
2719 added.append(f)
2715 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2720 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2716 trp, changed,
2721 trp, changed,
2717 writefilecopymeta)
2722 writefilecopymeta)
2718 m.setflag(f, fctx.flags())
2723 m.setflag(f, fctx.flags())
2719 except OSError:
2724 except OSError:
2720 self.ui.warn(_("trouble committing %s!\n") %
2725 self.ui.warn(_("trouble committing %s!\n") %
2721 uipathfn(f))
2726 uipathfn(f))
2722 raise
2727 raise
2723 except IOError as inst:
2728 except IOError as inst:
2724 errcode = getattr(inst, 'errno', errno.ENOENT)
2729 errcode = getattr(inst, 'errno', errno.ENOENT)
2725 if error or errcode and errcode != errno.ENOENT:
2730 if error or errcode and errcode != errno.ENOENT:
2726 self.ui.warn(_("trouble committing %s!\n") %
2731 self.ui.warn(_("trouble committing %s!\n") %
2727 uipathfn(f))
2732 uipathfn(f))
2728 raise
2733 raise
2729
2734
2730 # update manifest
2735 # update manifest
2731 removed = [f for f in removed if f in m1 or f in m2]
2736 removed = [f for f in removed if f in m1 or f in m2]
2732 drop = sorted([f for f in removed if f in m])
2737 drop = sorted([f for f in removed if f in m])
2733 for f in drop:
2738 for f in drop:
2734 del m[f]
2739 del m[f]
2735 if p2.rev() != nullrev:
2740 if p2.rev() != nullrev:
2736 @util.cachefunc
2741 @util.cachefunc
2737 def mas():
2742 def mas():
2738 p1n = p1.node()
2743 p1n = p1.node()
2739 p2n = p2.node()
2744 p2n = p2.node()
2740 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2745 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2741 if not cahs:
2746 if not cahs:
2742 cahs = [nullrev]
2747 cahs = [nullrev]
2743 return [self[r].manifest() for r in cahs]
2748 return [self[r].manifest() for r in cahs]
2744 def deletionfromparent(f):
2749 def deletionfromparent(f):
2745 # When a file is removed relative to p1 in a merge, this
2750 # When a file is removed relative to p1 in a merge, this
2746 # function determines whether the absence is due to a
2751 # function determines whether the absence is due to a
2747 # deletion from a parent, or whether the merge commit
2752 # deletion from a parent, or whether the merge commit
2748 # itself deletes the file. We decide this by doing a
2753 # itself deletes the file. We decide this by doing a
2749 # simplified three way merge of the manifest entry for
2754 # simplified three way merge of the manifest entry for
2750 # the file. There are two ways we decide the merge
2755 # the file. There are two ways we decide the merge
2751 # itself didn't delete a file:
2756 # itself didn't delete a file:
2752 # - neither parent (nor the merge) contain the file
2757 # - neither parent (nor the merge) contain the file
2753 # - exactly one parent contains the file, and that
2758 # - exactly one parent contains the file, and that
2754 # parent has the same filelog entry as the merge
2759 # parent has the same filelog entry as the merge
2755 # ancestor (or all of them if there two). In other
2760 # ancestor (or all of them if there two). In other
2756 # words, that parent left the file unchanged while the
2761 # words, that parent left the file unchanged while the
2757 # other one deleted it.
2762 # other one deleted it.
2758 # One way to think about this is that deleting a file is
2763 # One way to think about this is that deleting a file is
2759 # similar to emptying it, so the list of changed files
2764 # similar to emptying it, so the list of changed files
2760 # should be similar either way. The computation
2765 # should be similar either way. The computation
2761 # described above is not done directly in _filecommit
2766 # described above is not done directly in _filecommit
2762 # when creating the list of changed files, however
2767 # when creating the list of changed files, however
2763 # it does something very similar by comparing filelog
2768 # it does something very similar by comparing filelog
2764 # nodes.
2769 # nodes.
2765 if f in m1:
2770 if f in m1:
2766 return (f not in m2
2771 return (f not in m2
2767 and all(f in ma and ma.find(f) == m1.find(f)
2772 and all(f in ma and ma.find(f) == m1.find(f)
2768 for ma in mas()))
2773 for ma in mas()))
2769 elif f in m2:
2774 elif f in m2:
2770 return all(f in ma and ma.find(f) == m2.find(f)
2775 return all(f in ma and ma.find(f) == m2.find(f)
2771 for ma in mas())
2776 for ma in mas())
2772 else:
2777 else:
2773 return True
2778 return True
2774 removed = [f for f in removed if not deletionfromparent(f)]
2779 removed = [f for f in removed if not deletionfromparent(f)]
2775
2780
2776 files = changed + removed
2781 files = changed + removed
2777 md = None
2782 md = None
2778 if not files:
2783 if not files:
2779 # if no "files" actually changed in terms of the changelog,
2784 # if no "files" actually changed in terms of the changelog,
2780 # try hard to detect unmodified manifest entry so that the
2785 # try hard to detect unmodified manifest entry so that the
2781 # exact same commit can be reproduced later on convert.
2786 # exact same commit can be reproduced later on convert.
2782 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2787 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2783 if not files and md:
2788 if not files and md:
2784 self.ui.debug('not reusing manifest (no file change in '
2789 self.ui.debug('not reusing manifest (no file change in '
2785 'changelog, but manifest differs)\n')
2790 'changelog, but manifest differs)\n')
2786 if files or md:
2791 if files or md:
2787 self.ui.note(_("committing manifest\n"))
2792 self.ui.note(_("committing manifest\n"))
2788 # we're using narrowmatch here since it's already applied at
2793 # we're using narrowmatch here since it's already applied at
2789 # other stages (such as dirstate.walk), so we're already
2794 # other stages (such as dirstate.walk), so we're already
2790 # ignoring things outside of narrowspec in most cases. The
2795 # ignoring things outside of narrowspec in most cases. The
2791 # one case where we might have files outside the narrowspec
2796 # one case where we might have files outside the narrowspec
2792 # at this point is merges, and we already error out in the
2797 # at this point is merges, and we already error out in the
2793 # case where the merge has files outside of the narrowspec,
2798 # case where the merge has files outside of the narrowspec,
2794 # so this is safe.
2799 # so this is safe.
2795 mn = mctx.write(trp, linkrev,
2800 mn = mctx.write(trp, linkrev,
2796 p1.manifestnode(), p2.manifestnode(),
2801 p1.manifestnode(), p2.manifestnode(),
2797 added, drop, match=self.narrowmatch())
2802 added, drop, match=self.narrowmatch())
2798
2803
2799 if writechangesetcopy:
2804 if writechangesetcopy:
2800 filesadded = [f for f in changed
2805 filesadded = [f for f in changed
2801 if not (f in m1 or f in m2)]
2806 if not (f in m1 or f in m2)]
2802 filesremoved = removed
2807 filesremoved = removed
2803 else:
2808 else:
2804 self.ui.debug('reusing manifest from p1 (listed files '
2809 self.ui.debug('reusing manifest from p1 (listed files '
2805 'actually unchanged)\n')
2810 'actually unchanged)\n')
2806 mn = p1.manifestnode()
2811 mn = p1.manifestnode()
2807 else:
2812 else:
2808 self.ui.debug('reusing manifest from p1 (no file change)\n')
2813 self.ui.debug('reusing manifest from p1 (no file change)\n')
2809 mn = p1.manifestnode()
2814 mn = p1.manifestnode()
2810 files = []
2815 files = []
2811
2816
2812 if writecopiesto == 'changeset-only':
2817 if writecopiesto == 'changeset-only':
2813 # If writing only to changeset extras, use None to indicate that
2818 # If writing only to changeset extras, use None to indicate that
2814 # no entry should be written. If writing to both, write an empty
2819 # no entry should be written. If writing to both, write an empty
2815 # entry to prevent the reader from falling back to reading
2820 # entry to prevent the reader from falling back to reading
2816 # filelogs.
2821 # filelogs.
2817 p1copies = p1copies or None
2822 p1copies = p1copies or None
2818 p2copies = p2copies or None
2823 p2copies = p2copies or None
2819 filesadded = filesadded or None
2824 filesadded = filesadded or None
2820 filesremoved = filesremoved or None
2825 filesremoved = filesremoved or None
2821
2826
2822 if origctx and origctx.manifestnode() == mn:
2827 if origctx and origctx.manifestnode() == mn:
2823 files = origctx.files()
2828 files = origctx.files()
2824
2829
2825 # update changelog
2830 # update changelog
2826 self.ui.note(_("committing changelog\n"))
2831 self.ui.note(_("committing changelog\n"))
2827 self.changelog.delayupdate(tr)
2832 self.changelog.delayupdate(tr)
2828 n = self.changelog.add(mn, files, ctx.description(),
2833 n = self.changelog.add(mn, files, ctx.description(),
2829 trp, p1.node(), p2.node(),
2834 trp, p1.node(), p2.node(),
2830 user, ctx.date(), ctx.extra().copy(),
2835 user, ctx.date(), ctx.extra().copy(),
2831 p1copies, p2copies, filesadded, filesremoved)
2836 p1copies, p2copies, filesadded, filesremoved)
2832 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2837 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2833 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2838 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2834 parent2=xp2)
2839 parent2=xp2)
2835 # set the new commit is proper phase
2840 # set the new commit is proper phase
2836 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2841 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2837 if targetphase:
2842 if targetphase:
2838 # retract boundary do not alter parent changeset.
2843 # retract boundary do not alter parent changeset.
2839 # if a parent have higher the resulting phase will
2844 # if a parent have higher the resulting phase will
2840 # be compliant anyway
2845 # be compliant anyway
2841 #
2846 #
2842 # if minimal phase was 0 we don't need to retract anything
2847 # if minimal phase was 0 we don't need to retract anything
2843 phases.registernew(self, tr, targetphase, [n])
2848 phases.registernew(self, tr, targetphase, [n])
2844 return n
2849 return n
2845
2850
2846 @unfilteredmethod
2851 @unfilteredmethod
2847 def destroying(self):
2852 def destroying(self):
2848 '''Inform the repository that nodes are about to be destroyed.
2853 '''Inform the repository that nodes are about to be destroyed.
2849 Intended for use by strip and rollback, so there's a common
2854 Intended for use by strip and rollback, so there's a common
2850 place for anything that has to be done before destroying history.
2855 place for anything that has to be done before destroying history.
2851
2856
2852 This is mostly useful for saving state that is in memory and waiting
2857 This is mostly useful for saving state that is in memory and waiting
2853 to be flushed when the current lock is released. Because a call to
2858 to be flushed when the current lock is released. Because a call to
2854 destroyed is imminent, the repo will be invalidated causing those
2859 destroyed is imminent, the repo will be invalidated causing those
2855 changes to stay in memory (waiting for the next unlock), or vanish
2860 changes to stay in memory (waiting for the next unlock), or vanish
2856 completely.
2861 completely.
2857 '''
2862 '''
2858 # When using the same lock to commit and strip, the phasecache is left
2863 # When using the same lock to commit and strip, the phasecache is left
2859 # dirty after committing. Then when we strip, the repo is invalidated,
2864 # dirty after committing. Then when we strip, the repo is invalidated,
2860 # causing those changes to disappear.
2865 # causing those changes to disappear.
2861 if '_phasecache' in vars(self):
2866 if '_phasecache' in vars(self):
2862 self._phasecache.write()
2867 self._phasecache.write()
2863
2868
2864 @unfilteredmethod
2869 @unfilteredmethod
2865 def destroyed(self):
2870 def destroyed(self):
2866 '''Inform the repository that nodes have been destroyed.
2871 '''Inform the repository that nodes have been destroyed.
2867 Intended for use by strip and rollback, so there's a common
2872 Intended for use by strip and rollback, so there's a common
2868 place for anything that has to be done after destroying history.
2873 place for anything that has to be done after destroying history.
2869 '''
2874 '''
2870 # When one tries to:
2875 # When one tries to:
2871 # 1) destroy nodes thus calling this method (e.g. strip)
2876 # 1) destroy nodes thus calling this method (e.g. strip)
2872 # 2) use phasecache somewhere (e.g. commit)
2877 # 2) use phasecache somewhere (e.g. commit)
2873 #
2878 #
2874 # then 2) will fail because the phasecache contains nodes that were
2879 # then 2) will fail because the phasecache contains nodes that were
2875 # removed. We can either remove phasecache from the filecache,
2880 # removed. We can either remove phasecache from the filecache,
2876 # causing it to reload next time it is accessed, or simply filter
2881 # causing it to reload next time it is accessed, or simply filter
2877 # the removed nodes now and write the updated cache.
2882 # the removed nodes now and write the updated cache.
2878 self._phasecache.filterunknown(self)
2883 self._phasecache.filterunknown(self)
2879 self._phasecache.write()
2884 self._phasecache.write()
2880
2885
2881 # refresh all repository caches
2886 # refresh all repository caches
2882 self.updatecaches()
2887 self.updatecaches()
2883
2888
2884 # Ensure the persistent tag cache is updated. Doing it now
2889 # Ensure the persistent tag cache is updated. Doing it now
2885 # means that the tag cache only has to worry about destroyed
2890 # means that the tag cache only has to worry about destroyed
2886 # heads immediately after a strip/rollback. That in turn
2891 # heads immediately after a strip/rollback. That in turn
2887 # guarantees that "cachetip == currenttip" (comparing both rev
2892 # guarantees that "cachetip == currenttip" (comparing both rev
2888 # and node) always means no nodes have been added or destroyed.
2893 # and node) always means no nodes have been added or destroyed.
2889
2894
2890 # XXX this is suboptimal when qrefresh'ing: we strip the current
2895 # XXX this is suboptimal when qrefresh'ing: we strip the current
2891 # head, refresh the tag cache, then immediately add a new head.
2896 # head, refresh the tag cache, then immediately add a new head.
2892 # But I think doing it this way is necessary for the "instant
2897 # But I think doing it this way is necessary for the "instant
2893 # tag cache retrieval" case to work.
2898 # tag cache retrieval" case to work.
2894 self.invalidate()
2899 self.invalidate()
2895
2900
2896 def status(self, node1='.', node2=None, match=None,
2901 def status(self, node1='.', node2=None, match=None,
2897 ignored=False, clean=False, unknown=False,
2902 ignored=False, clean=False, unknown=False,
2898 listsubrepos=False):
2903 listsubrepos=False):
2899 '''a convenience method that calls node1.status(node2)'''
2904 '''a convenience method that calls node1.status(node2)'''
2900 return self[node1].status(node2, match, ignored, clean, unknown,
2905 return self[node1].status(node2, match, ignored, clean, unknown,
2901 listsubrepos)
2906 listsubrepos)
2902
2907
2903 def addpostdsstatus(self, ps):
2908 def addpostdsstatus(self, ps):
2904 """Add a callback to run within the wlock, at the point at which status
2909 """Add a callback to run within the wlock, at the point at which status
2905 fixups happen.
2910 fixups happen.
2906
2911
2907 On status completion, callback(wctx, status) will be called with the
2912 On status completion, callback(wctx, status) will be called with the
2908 wlock held, unless the dirstate has changed from underneath or the wlock
2913 wlock held, unless the dirstate has changed from underneath or the wlock
2909 couldn't be grabbed.
2914 couldn't be grabbed.
2910
2915
2911 Callbacks should not capture and use a cached copy of the dirstate --
2916 Callbacks should not capture and use a cached copy of the dirstate --
2912 it might change in the meanwhile. Instead, they should access the
2917 it might change in the meanwhile. Instead, they should access the
2913 dirstate via wctx.repo().dirstate.
2918 dirstate via wctx.repo().dirstate.
2914
2919
2915 This list is emptied out after each status run -- extensions should
2920 This list is emptied out after each status run -- extensions should
2916 make sure it adds to this list each time dirstate.status is called.
2921 make sure it adds to this list each time dirstate.status is called.
2917 Extensions should also make sure they don't call this for statuses
2922 Extensions should also make sure they don't call this for statuses
2918 that don't involve the dirstate.
2923 that don't involve the dirstate.
2919 """
2924 """
2920
2925
2921 # The list is located here for uniqueness reasons -- it is actually
2926 # The list is located here for uniqueness reasons -- it is actually
2922 # managed by the workingctx, but that isn't unique per-repo.
2927 # managed by the workingctx, but that isn't unique per-repo.
2923 self._postdsstatus.append(ps)
2928 self._postdsstatus.append(ps)
2924
2929
2925 def postdsstatus(self):
2930 def postdsstatus(self):
2926 """Used by workingctx to get the list of post-dirstate-status hooks."""
2931 """Used by workingctx to get the list of post-dirstate-status hooks."""
2927 return self._postdsstatus
2932 return self._postdsstatus
2928
2933
2929 def clearpostdsstatus(self):
2934 def clearpostdsstatus(self):
2930 """Used by workingctx to clear post-dirstate-status hooks."""
2935 """Used by workingctx to clear post-dirstate-status hooks."""
2931 del self._postdsstatus[:]
2936 del self._postdsstatus[:]
2932
2937
2933 def heads(self, start=None):
2938 def heads(self, start=None):
2934 if start is None:
2939 if start is None:
2935 cl = self.changelog
2940 cl = self.changelog
2936 headrevs = reversed(cl.headrevs())
2941 headrevs = reversed(cl.headrevs())
2937 return [cl.node(rev) for rev in headrevs]
2942 return [cl.node(rev) for rev in headrevs]
2938
2943
2939 heads = self.changelog.heads(start)
2944 heads = self.changelog.heads(start)
2940 # sort the output in rev descending order
2945 # sort the output in rev descending order
2941 return sorted(heads, key=self.changelog.rev, reverse=True)
2946 return sorted(heads, key=self.changelog.rev, reverse=True)
2942
2947
2943 def branchheads(self, branch=None, start=None, closed=False):
2948 def branchheads(self, branch=None, start=None, closed=False):
2944 '''return a (possibly filtered) list of heads for the given branch
2949 '''return a (possibly filtered) list of heads for the given branch
2945
2950
2946 Heads are returned in topological order, from newest to oldest.
2951 Heads are returned in topological order, from newest to oldest.
2947 If branch is None, use the dirstate branch.
2952 If branch is None, use the dirstate branch.
2948 If start is not None, return only heads reachable from start.
2953 If start is not None, return only heads reachable from start.
2949 If closed is True, return heads that are marked as closed as well.
2954 If closed is True, return heads that are marked as closed as well.
2950 '''
2955 '''
2951 if branch is None:
2956 if branch is None:
2952 branch = self[None].branch()
2957 branch = self[None].branch()
2953 branches = self.branchmap()
2958 branches = self.branchmap()
2954 if not branches.hasbranch(branch):
2959 if not branches.hasbranch(branch):
2955 return []
2960 return []
2956 # the cache returns heads ordered lowest to highest
2961 # the cache returns heads ordered lowest to highest
2957 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2962 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2958 if start is not None:
2963 if start is not None:
2959 # filter out the heads that cannot be reached from startrev
2964 # filter out the heads that cannot be reached from startrev
2960 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2965 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2961 bheads = [h for h in bheads if h in fbheads]
2966 bheads = [h for h in bheads if h in fbheads]
2962 return bheads
2967 return bheads
2963
2968
2964 def branches(self, nodes):
2969 def branches(self, nodes):
2965 if not nodes:
2970 if not nodes:
2966 nodes = [self.changelog.tip()]
2971 nodes = [self.changelog.tip()]
2967 b = []
2972 b = []
2968 for n in nodes:
2973 for n in nodes:
2969 t = n
2974 t = n
2970 while True:
2975 while True:
2971 p = self.changelog.parents(n)
2976 p = self.changelog.parents(n)
2972 if p[1] != nullid or p[0] == nullid:
2977 if p[1] != nullid or p[0] == nullid:
2973 b.append((t, n, p[0], p[1]))
2978 b.append((t, n, p[0], p[1]))
2974 break
2979 break
2975 n = p[0]
2980 n = p[0]
2976 return b
2981 return b
2977
2982
2978 def between(self, pairs):
2983 def between(self, pairs):
2979 r = []
2984 r = []
2980
2985
2981 for top, bottom in pairs:
2986 for top, bottom in pairs:
2982 n, l, i = top, [], 0
2987 n, l, i = top, [], 0
2983 f = 1
2988 f = 1
2984
2989
2985 while n != bottom and n != nullid:
2990 while n != bottom and n != nullid:
2986 p = self.changelog.parents(n)[0]
2991 p = self.changelog.parents(n)[0]
2987 if i == f:
2992 if i == f:
2988 l.append(n)
2993 l.append(n)
2989 f = f * 2
2994 f = f * 2
2990 n = p
2995 n = p
2991 i += 1
2996 i += 1
2992
2997
2993 r.append(l)
2998 r.append(l)
2994
2999
2995 return r
3000 return r
2996
3001
2997 def checkpush(self, pushop):
3002 def checkpush(self, pushop):
2998 """Extensions can override this function if additional checks have
3003 """Extensions can override this function if additional checks have
2999 to be performed before pushing, or call it if they override push
3004 to be performed before pushing, or call it if they override push
3000 command.
3005 command.
3001 """
3006 """
3002
3007
3003 @unfilteredpropertycache
3008 @unfilteredpropertycache
3004 def prepushoutgoinghooks(self):
3009 def prepushoutgoinghooks(self):
3005 """Return util.hooks consists of a pushop with repo, remote, outgoing
3010 """Return util.hooks consists of a pushop with repo, remote, outgoing
3006 methods, which are called before pushing changesets.
3011 methods, which are called before pushing changesets.
3007 """
3012 """
3008 return util.hooks()
3013 return util.hooks()
3009
3014
3010 def pushkey(self, namespace, key, old, new):
3015 def pushkey(self, namespace, key, old, new):
3011 try:
3016 try:
3012 tr = self.currenttransaction()
3017 tr = self.currenttransaction()
3013 hookargs = {}
3018 hookargs = {}
3014 if tr is not None:
3019 if tr is not None:
3015 hookargs.update(tr.hookargs)
3020 hookargs.update(tr.hookargs)
3016 hookargs = pycompat.strkwargs(hookargs)
3021 hookargs = pycompat.strkwargs(hookargs)
3017 hookargs[r'namespace'] = namespace
3022 hookargs[r'namespace'] = namespace
3018 hookargs[r'key'] = key
3023 hookargs[r'key'] = key
3019 hookargs[r'old'] = old
3024 hookargs[r'old'] = old
3020 hookargs[r'new'] = new
3025 hookargs[r'new'] = new
3021 self.hook('prepushkey', throw=True, **hookargs)
3026 self.hook('prepushkey', throw=True, **hookargs)
3022 except error.HookAbort as exc:
3027 except error.HookAbort as exc:
3023 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
3028 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
3024 if exc.hint:
3029 if exc.hint:
3025 self.ui.write_err(_("(%s)\n") % exc.hint)
3030 self.ui.write_err(_("(%s)\n") % exc.hint)
3026 return False
3031 return False
3027 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
3032 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
3028 ret = pushkey.push(self, namespace, key, old, new)
3033 ret = pushkey.push(self, namespace, key, old, new)
3029 def runhook():
3034 def runhook():
3030 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
3035 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
3031 ret=ret)
3036 ret=ret)
3032 self._afterlock(runhook)
3037 self._afterlock(runhook)
3033 return ret
3038 return ret
3034
3039
3035 def listkeys(self, namespace):
3040 def listkeys(self, namespace):
3036 self.hook('prelistkeys', throw=True, namespace=namespace)
3041 self.hook('prelistkeys', throw=True, namespace=namespace)
3037 self.ui.debug('listing keys for "%s"\n' % namespace)
3042 self.ui.debug('listing keys for "%s"\n' % namespace)
3038 values = pushkey.list(self, namespace)
3043 values = pushkey.list(self, namespace)
3039 self.hook('listkeys', namespace=namespace, values=values)
3044 self.hook('listkeys', namespace=namespace, values=values)
3040 return values
3045 return values
3041
3046
3042 def debugwireargs(self, one, two, three=None, four=None, five=None):
3047 def debugwireargs(self, one, two, three=None, four=None, five=None):
3043 '''used to test argument passing over the wire'''
3048 '''used to test argument passing over the wire'''
3044 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
3049 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
3045 pycompat.bytestr(four),
3050 pycompat.bytestr(four),
3046 pycompat.bytestr(five))
3051 pycompat.bytestr(five))
3047
3052
3048 def savecommitmessage(self, text):
3053 def savecommitmessage(self, text):
3049 fp = self.vfs('last-message.txt', 'wb')
3054 fp = self.vfs('last-message.txt', 'wb')
3050 try:
3055 try:
3051 fp.write(text)
3056 fp.write(text)
3052 finally:
3057 finally:
3053 fp.close()
3058 fp.close()
3054 return self.pathto(fp.name[len(self.root) + 1:])
3059 return self.pathto(fp.name[len(self.root) + 1:])
3055
3060
3056 # used to avoid circular references so destructors work
3061 # used to avoid circular references so destructors work
3057 def aftertrans(files):
3062 def aftertrans(files):
3058 renamefiles = [tuple(t) for t in files]
3063 renamefiles = [tuple(t) for t in files]
3059 def a():
3064 def a():
3060 for vfs, src, dest in renamefiles:
3065 for vfs, src, dest in renamefiles:
3061 # if src and dest refer to a same file, vfs.rename is a no-op,
3066 # if src and dest refer to a same file, vfs.rename is a no-op,
3062 # leaving both src and dest on disk. delete dest to make sure
3067 # leaving both src and dest on disk. delete dest to make sure
3063 # the rename couldn't be such a no-op.
3068 # the rename couldn't be such a no-op.
3064 vfs.tryunlink(dest)
3069 vfs.tryunlink(dest)
3065 try:
3070 try:
3066 vfs.rename(src, dest)
3071 vfs.rename(src, dest)
3067 except OSError: # journal file does not yet exist
3072 except OSError: # journal file does not yet exist
3068 pass
3073 pass
3069 return a
3074 return a
3070
3075
3071 def undoname(fn):
3076 def undoname(fn):
3072 base, name = os.path.split(fn)
3077 base, name = os.path.split(fn)
3073 assert name.startswith('journal')
3078 assert name.startswith('journal')
3074 return os.path.join(base, name.replace('journal', 'undo', 1))
3079 return os.path.join(base, name.replace('journal', 'undo', 1))
3075
3080
3076 def instance(ui, path, create, intents=None, createopts=None):
3081 def instance(ui, path, create, intents=None, createopts=None):
3077 localpath = util.urllocalpath(path)
3082 localpath = util.urllocalpath(path)
3078 if create:
3083 if create:
3079 createrepository(ui, localpath, createopts=createopts)
3084 createrepository(ui, localpath, createopts=createopts)
3080
3085
3081 return makelocalrepository(ui, localpath, intents=intents)
3086 return makelocalrepository(ui, localpath, intents=intents)
3082
3087
3083 def islocal(path):
3088 def islocal(path):
3084 return True
3089 return True
3085
3090
3086 def defaultcreateopts(ui, createopts=None):
3091 def defaultcreateopts(ui, createopts=None):
3087 """Populate the default creation options for a repository.
3092 """Populate the default creation options for a repository.
3088
3093
3089 A dictionary of explicitly requested creation options can be passed
3094 A dictionary of explicitly requested creation options can be passed
3090 in. Missing keys will be populated.
3095 in. Missing keys will be populated.
3091 """
3096 """
3092 createopts = dict(createopts or {})
3097 createopts = dict(createopts or {})
3093
3098
3094 if 'backend' not in createopts:
3099 if 'backend' not in createopts:
3095 # experimental config: storage.new-repo-backend
3100 # experimental config: storage.new-repo-backend
3096 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3101 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3097
3102
3098 return createopts
3103 return createopts
3099
3104
3100 def newreporequirements(ui, createopts):
3105 def newreporequirements(ui, createopts):
3101 """Determine the set of requirements for a new local repository.
3106 """Determine the set of requirements for a new local repository.
3102
3107
3103 Extensions can wrap this function to specify custom requirements for
3108 Extensions can wrap this function to specify custom requirements for
3104 new repositories.
3109 new repositories.
3105 """
3110 """
3106 # If the repo is being created from a shared repository, we copy
3111 # If the repo is being created from a shared repository, we copy
3107 # its requirements.
3112 # its requirements.
3108 if 'sharedrepo' in createopts:
3113 if 'sharedrepo' in createopts:
3109 requirements = set(createopts['sharedrepo'].requirements)
3114 requirements = set(createopts['sharedrepo'].requirements)
3110 if createopts.get('sharedrelative'):
3115 if createopts.get('sharedrelative'):
3111 requirements.add('relshared')
3116 requirements.add('relshared')
3112 else:
3117 else:
3113 requirements.add('shared')
3118 requirements.add('shared')
3114
3119
3115 return requirements
3120 return requirements
3116
3121
3117 if 'backend' not in createopts:
3122 if 'backend' not in createopts:
3118 raise error.ProgrammingError('backend key not present in createopts; '
3123 raise error.ProgrammingError('backend key not present in createopts; '
3119 'was defaultcreateopts() called?')
3124 'was defaultcreateopts() called?')
3120
3125
3121 if createopts['backend'] != 'revlogv1':
3126 if createopts['backend'] != 'revlogv1':
3122 raise error.Abort(_('unable to determine repository requirements for '
3127 raise error.Abort(_('unable to determine repository requirements for '
3123 'storage backend: %s') % createopts['backend'])
3128 'storage backend: %s') % createopts['backend'])
3124
3129
3125 requirements = {'revlogv1'}
3130 requirements = {'revlogv1'}
3126 if ui.configbool('format', 'usestore'):
3131 if ui.configbool('format', 'usestore'):
3127 requirements.add('store')
3132 requirements.add('store')
3128 if ui.configbool('format', 'usefncache'):
3133 if ui.configbool('format', 'usefncache'):
3129 requirements.add('fncache')
3134 requirements.add('fncache')
3130 if ui.configbool('format', 'dotencode'):
3135 if ui.configbool('format', 'dotencode'):
3131 requirements.add('dotencode')
3136 requirements.add('dotencode')
3132
3137
3133 compengine = ui.config('format', 'revlog-compression')
3138 compengine = ui.config('format', 'revlog-compression')
3134 if compengine not in util.compengines:
3139 if compengine not in util.compengines:
3135 raise error.Abort(_('compression engine %s defined by '
3140 raise error.Abort(_('compression engine %s defined by '
3136 'format.revlog-compression not available') %
3141 'format.revlog-compression not available') %
3137 compengine,
3142 compengine,
3138 hint=_('run "hg debuginstall" to list available '
3143 hint=_('run "hg debuginstall" to list available '
3139 'compression engines'))
3144 'compression engines'))
3140
3145
3141 # zlib is the historical default and doesn't need an explicit requirement.
3146 # zlib is the historical default and doesn't need an explicit requirement.
3142 elif compengine == 'zstd':
3147 elif compengine == 'zstd':
3143 requirements.add('revlog-compression-zstd')
3148 requirements.add('revlog-compression-zstd')
3144 elif compengine != 'zlib':
3149 elif compengine != 'zlib':
3145 requirements.add('exp-compression-%s' % compengine)
3150 requirements.add('exp-compression-%s' % compengine)
3146
3151
3147 if scmutil.gdinitconfig(ui):
3152 if scmutil.gdinitconfig(ui):
3148 requirements.add('generaldelta')
3153 requirements.add('generaldelta')
3149 if ui.configbool('format', 'sparse-revlog'):
3154 if ui.configbool('format', 'sparse-revlog'):
3150 requirements.add(SPARSEREVLOG_REQUIREMENT)
3155 requirements.add(SPARSEREVLOG_REQUIREMENT)
3151 if ui.configbool('experimental', 'treemanifest'):
3156 if ui.configbool('experimental', 'treemanifest'):
3152 requirements.add('treemanifest')
3157 requirements.add('treemanifest')
3153
3158
3154 revlogv2 = ui.config('experimental', 'revlogv2')
3159 revlogv2 = ui.config('experimental', 'revlogv2')
3155 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3160 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3156 requirements.remove('revlogv1')
3161 requirements.remove('revlogv1')
3157 # generaldelta is implied by revlogv2.
3162 # generaldelta is implied by revlogv2.
3158 requirements.discard('generaldelta')
3163 requirements.discard('generaldelta')
3159 requirements.add(REVLOGV2_REQUIREMENT)
3164 requirements.add(REVLOGV2_REQUIREMENT)
3160 # experimental config: format.internal-phase
3165 # experimental config: format.internal-phase
3161 if ui.configbool('format', 'internal-phase'):
3166 if ui.configbool('format', 'internal-phase'):
3162 requirements.add('internal-phase')
3167 requirements.add('internal-phase')
3163
3168
3164 if createopts.get('narrowfiles'):
3169 if createopts.get('narrowfiles'):
3165 requirements.add(repository.NARROW_REQUIREMENT)
3170 requirements.add(repository.NARROW_REQUIREMENT)
3166
3171
3167 if createopts.get('lfs'):
3172 if createopts.get('lfs'):
3168 requirements.add('lfs')
3173 requirements.add('lfs')
3169
3174
3170 if ui.configbool('format', 'bookmarks-in-store'):
3175 if ui.configbool('format', 'bookmarks-in-store'):
3171 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3176 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3172
3177
3173 return requirements
3178 return requirements
3174
3179
3175 def filterknowncreateopts(ui, createopts):
3180 def filterknowncreateopts(ui, createopts):
3176 """Filters a dict of repo creation options against options that are known.
3181 """Filters a dict of repo creation options against options that are known.
3177
3182
3178 Receives a dict of repo creation options and returns a dict of those
3183 Receives a dict of repo creation options and returns a dict of those
3179 options that we don't know how to handle.
3184 options that we don't know how to handle.
3180
3185
3181 This function is called as part of repository creation. If the
3186 This function is called as part of repository creation. If the
3182 returned dict contains any items, repository creation will not
3187 returned dict contains any items, repository creation will not
3183 be allowed, as it means there was a request to create a repository
3188 be allowed, as it means there was a request to create a repository
3184 with options not recognized by loaded code.
3189 with options not recognized by loaded code.
3185
3190
3186 Extensions can wrap this function to filter out creation options
3191 Extensions can wrap this function to filter out creation options
3187 they know how to handle.
3192 they know how to handle.
3188 """
3193 """
3189 known = {
3194 known = {
3190 'backend',
3195 'backend',
3191 'lfs',
3196 'lfs',
3192 'narrowfiles',
3197 'narrowfiles',
3193 'sharedrepo',
3198 'sharedrepo',
3194 'sharedrelative',
3199 'sharedrelative',
3195 'shareditems',
3200 'shareditems',
3196 'shallowfilestore',
3201 'shallowfilestore',
3197 }
3202 }
3198
3203
3199 return {k: v for k, v in createopts.items() if k not in known}
3204 return {k: v for k, v in createopts.items() if k not in known}
3200
3205
3201 def createrepository(ui, path, createopts=None):
3206 def createrepository(ui, path, createopts=None):
3202 """Create a new repository in a vfs.
3207 """Create a new repository in a vfs.
3203
3208
3204 ``path`` path to the new repo's working directory.
3209 ``path`` path to the new repo's working directory.
3205 ``createopts`` options for the new repository.
3210 ``createopts`` options for the new repository.
3206
3211
3207 The following keys for ``createopts`` are recognized:
3212 The following keys for ``createopts`` are recognized:
3208
3213
3209 backend
3214 backend
3210 The storage backend to use.
3215 The storage backend to use.
3211 lfs
3216 lfs
3212 Repository will be created with ``lfs`` requirement. The lfs extension
3217 Repository will be created with ``lfs`` requirement. The lfs extension
3213 will automatically be loaded when the repository is accessed.
3218 will automatically be loaded when the repository is accessed.
3214 narrowfiles
3219 narrowfiles
3215 Set up repository to support narrow file storage.
3220 Set up repository to support narrow file storage.
3216 sharedrepo
3221 sharedrepo
3217 Repository object from which storage should be shared.
3222 Repository object from which storage should be shared.
3218 sharedrelative
3223 sharedrelative
3219 Boolean indicating if the path to the shared repo should be
3224 Boolean indicating if the path to the shared repo should be
3220 stored as relative. By default, the pointer to the "parent" repo
3225 stored as relative. By default, the pointer to the "parent" repo
3221 is stored as an absolute path.
3226 is stored as an absolute path.
3222 shareditems
3227 shareditems
3223 Set of items to share to the new repository (in addition to storage).
3228 Set of items to share to the new repository (in addition to storage).
3224 shallowfilestore
3229 shallowfilestore
3225 Indicates that storage for files should be shallow (not all ancestor
3230 Indicates that storage for files should be shallow (not all ancestor
3226 revisions are known).
3231 revisions are known).
3227 """
3232 """
3228 createopts = defaultcreateopts(ui, createopts=createopts)
3233 createopts = defaultcreateopts(ui, createopts=createopts)
3229
3234
3230 unknownopts = filterknowncreateopts(ui, createopts)
3235 unknownopts = filterknowncreateopts(ui, createopts)
3231
3236
3232 if not isinstance(unknownopts, dict):
3237 if not isinstance(unknownopts, dict):
3233 raise error.ProgrammingError('filterknowncreateopts() did not return '
3238 raise error.ProgrammingError('filterknowncreateopts() did not return '
3234 'a dict')
3239 'a dict')
3235
3240
3236 if unknownopts:
3241 if unknownopts:
3237 raise error.Abort(_('unable to create repository because of unknown '
3242 raise error.Abort(_('unable to create repository because of unknown '
3238 'creation option: %s') %
3243 'creation option: %s') %
3239 ', '.join(sorted(unknownopts)),
3244 ', '.join(sorted(unknownopts)),
3240 hint=_('is a required extension not loaded?'))
3245 hint=_('is a required extension not loaded?'))
3241
3246
3242 requirements = newreporequirements(ui, createopts=createopts)
3247 requirements = newreporequirements(ui, createopts=createopts)
3243
3248
3244 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3249 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3245
3250
3246 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3251 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3247 if hgvfs.exists():
3252 if hgvfs.exists():
3248 raise error.RepoError(_('repository %s already exists') % path)
3253 raise error.RepoError(_('repository %s already exists') % path)
3249
3254
3250 if 'sharedrepo' in createopts:
3255 if 'sharedrepo' in createopts:
3251 sharedpath = createopts['sharedrepo'].sharedpath
3256 sharedpath = createopts['sharedrepo'].sharedpath
3252
3257
3253 if createopts.get('sharedrelative'):
3258 if createopts.get('sharedrelative'):
3254 try:
3259 try:
3255 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3260 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3256 except (IOError, ValueError) as e:
3261 except (IOError, ValueError) as e:
3257 # ValueError is raised on Windows if the drive letters differ
3262 # ValueError is raised on Windows if the drive letters differ
3258 # on each path.
3263 # on each path.
3259 raise error.Abort(_('cannot calculate relative path'),
3264 raise error.Abort(_('cannot calculate relative path'),
3260 hint=stringutil.forcebytestr(e))
3265 hint=stringutil.forcebytestr(e))
3261
3266
3262 if not wdirvfs.exists():
3267 if not wdirvfs.exists():
3263 wdirvfs.makedirs()
3268 wdirvfs.makedirs()
3264
3269
3265 hgvfs.makedir(notindexed=True)
3270 hgvfs.makedir(notindexed=True)
3266 if 'sharedrepo' not in createopts:
3271 if 'sharedrepo' not in createopts:
3267 hgvfs.mkdir(b'cache')
3272 hgvfs.mkdir(b'cache')
3268 hgvfs.mkdir(b'wcache')
3273 hgvfs.mkdir(b'wcache')
3269
3274
3270 if b'store' in requirements and 'sharedrepo' not in createopts:
3275 if b'store' in requirements and 'sharedrepo' not in createopts:
3271 hgvfs.mkdir(b'store')
3276 hgvfs.mkdir(b'store')
3272
3277
3273 # We create an invalid changelog outside the store so very old
3278 # We create an invalid changelog outside the store so very old
3274 # Mercurial versions (which didn't know about the requirements
3279 # Mercurial versions (which didn't know about the requirements
3275 # file) encounter an error on reading the changelog. This
3280 # file) encounter an error on reading the changelog. This
3276 # effectively locks out old clients and prevents them from
3281 # effectively locks out old clients and prevents them from
3277 # mucking with a repo in an unknown format.
3282 # mucking with a repo in an unknown format.
3278 #
3283 #
3279 # The revlog header has version 2, which won't be recognized by
3284 # The revlog header has version 2, which won't be recognized by
3280 # such old clients.
3285 # such old clients.
3281 hgvfs.append(b'00changelog.i',
3286 hgvfs.append(b'00changelog.i',
3282 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3287 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3283 b'layout')
3288 b'layout')
3284
3289
3285 scmutil.writerequires(hgvfs, requirements)
3290 scmutil.writerequires(hgvfs, requirements)
3286
3291
3287 # Write out file telling readers where to find the shared store.
3292 # Write out file telling readers where to find the shared store.
3288 if 'sharedrepo' in createopts:
3293 if 'sharedrepo' in createopts:
3289 hgvfs.write(b'sharedpath', sharedpath)
3294 hgvfs.write(b'sharedpath', sharedpath)
3290
3295
3291 if createopts.get('shareditems'):
3296 if createopts.get('shareditems'):
3292 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3297 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3293 hgvfs.write(b'shared', shared)
3298 hgvfs.write(b'shared', shared)
3294
3299
3295 def poisonrepository(repo):
3300 def poisonrepository(repo):
3296 """Poison a repository instance so it can no longer be used."""
3301 """Poison a repository instance so it can no longer be used."""
3297 # Perform any cleanup on the instance.
3302 # Perform any cleanup on the instance.
3298 repo.close()
3303 repo.close()
3299
3304
3300 # Our strategy is to replace the type of the object with one that
3305 # Our strategy is to replace the type of the object with one that
3301 # has all attribute lookups result in error.
3306 # has all attribute lookups result in error.
3302 #
3307 #
3303 # But we have to allow the close() method because some constructors
3308 # But we have to allow the close() method because some constructors
3304 # of repos call close() on repo references.
3309 # of repos call close() on repo references.
3305 class poisonedrepository(object):
3310 class poisonedrepository(object):
3306 def __getattribute__(self, item):
3311 def __getattribute__(self, item):
3307 if item == r'close':
3312 if item == r'close':
3308 return object.__getattribute__(self, item)
3313 return object.__getattribute__(self, item)
3309
3314
3310 raise error.ProgrammingError('repo instances should not be used '
3315 raise error.ProgrammingError('repo instances should not be used '
3311 'after unshare')
3316 'after unshare')
3312
3317
3313 def close(self):
3318 def close(self):
3314 pass
3319 pass
3315
3320
3316 # We may have a repoview, which intercepts __setattr__. So be sure
3321 # We may have a repoview, which intercepts __setattr__. So be sure
3317 # we operate at the lowest level possible.
3322 # we operate at the lowest level possible.
3318 object.__setattr__(repo, r'__class__', poisonedrepository)
3323 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now