##// END OF EJS Templates
changelog: optionally store added and removed files in changeset extras...
Martin von Zweigbergk -
r42598:f385ba70 default
parent child Browse files
Show More
@@ -1,623 +1,638 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import (
16 from .thirdparty import (
17 attr,
17 attr,
18 )
18 )
19
19
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 pycompat,
23 pycompat,
24 revlog,
24 revlog,
25 util,
25 util,
26 )
26 )
27 from .utils import (
27 from .utils import (
28 dateutil,
28 dateutil,
29 stringutil,
29 stringutil,
30 )
30 )
31
31
32 _defaultextra = {'branch': 'default'}
32 _defaultextra = {'branch': 'default'}
33
33
34 def _string_escape(text):
34 def _string_escape(text):
35 """
35 """
36 >>> from .pycompat import bytechr as chr
36 >>> from .pycompat import bytechr as chr
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s
39 >>> s
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 >>> res = _string_escape(s)
41 >>> res = _string_escape(s)
42 >>> s == _string_unescape(res)
42 >>> s == _string_unescape(res)
43 True
43 True
44 """
44 """
45 # subset of the string_escape codec
45 # subset of the string_escape codec
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
47 return text.replace('\0', '\\0')
47 return text.replace('\0', '\\0')
48
48
49 def _string_unescape(text):
49 def _string_unescape(text):
50 if '\\0' in text:
50 if '\\0' in text:
51 # fix up \0 without getting into trouble with \\0
51 # fix up \0 without getting into trouble with \\0
52 text = text.replace('\\\\', '\\\\\n')
52 text = text.replace('\\\\', '\\\\\n')
53 text = text.replace('\\0', '\0')
53 text = text.replace('\\0', '\0')
54 text = text.replace('\n', '')
54 text = text.replace('\n', '')
55 return stringutil.unescapestr(text)
55 return stringutil.unescapestr(text)
56
56
57 def decodeextra(text):
57 def decodeextra(text):
58 """
58 """
59 >>> from .pycompat import bytechr as chr
59 >>> from .pycompat import bytechr as chr
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
61 ... ).items())
61 ... ).items())
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
64 ... b'baz': chr(92) + chr(0) + b'2'})
64 ... b'baz': chr(92) + chr(0) + b'2'})
65 ... ).items())
65 ... ).items())
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
67 """
67 """
68 extra = _defaultextra.copy()
68 extra = _defaultextra.copy()
69 for l in text.split('\0'):
69 for l in text.split('\0'):
70 if l:
70 if l:
71 k, v = _string_unescape(l).split(':', 1)
71 k, v = _string_unescape(l).split(':', 1)
72 extra[k] = v
72 extra[k] = v
73 return extra
73 return extra
74
74
75 def encodeextra(d):
75 def encodeextra(d):
76 # keys must be sorted to produce a deterministic changelog entry
76 # keys must be sorted to produce a deterministic changelog entry
77 items = [
77 items = [
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
79 for k in sorted(d)
79 for k in sorted(d)
80 ]
80 ]
81 return "\0".join(items)
81 return "\0".join(items)
82
82
83 def encodecopies(copies):
83 def encodecopies(copies):
84 items = [
84 items = [
85 '%s\0%s' % (k, copies[k])
85 '%s\0%s' % (k, copies[k])
86 for k in sorted(copies)
86 for k in sorted(copies)
87 ]
87 ]
88 return "\n".join(items)
88 return "\n".join(items)
89
89
90 def decodecopies(data):
90 def decodecopies(data):
91 try:
91 try:
92 copies = {}
92 copies = {}
93 for l in data.split('\n'):
93 for l in data.split('\n'):
94 k, v = l.split('\0')
94 k, v = l.split('\0')
95 copies[k] = v
95 copies[k] = v
96 return copies
96 return copies
97 except ValueError:
97 except ValueError:
98 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
98 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
99 # used different syntax for the value.
99 # used different syntax for the value.
100 return None
100 return None
101
101
102 def encodefileindices(files, subset):
103 subset = set(subset)
104 indices = []
105 for i, f in enumerate(files):
106 if f in subset:
107 indices.append('%d' % i)
108 return '\0'.join(indices)
109
102 def stripdesc(desc):
110 def stripdesc(desc):
103 """strip trailing whitespace and leading and trailing empty lines"""
111 """strip trailing whitespace and leading and trailing empty lines"""
104 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
112 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
105
113
106 class appender(object):
114 class appender(object):
107 '''the changelog index must be updated last on disk, so we use this class
115 '''the changelog index must be updated last on disk, so we use this class
108 to delay writes to it'''
116 to delay writes to it'''
109 def __init__(self, vfs, name, mode, buf):
117 def __init__(self, vfs, name, mode, buf):
110 self.data = buf
118 self.data = buf
111 fp = vfs(name, mode)
119 fp = vfs(name, mode)
112 self.fp = fp
120 self.fp = fp
113 self.offset = fp.tell()
121 self.offset = fp.tell()
114 self.size = vfs.fstat(fp).st_size
122 self.size = vfs.fstat(fp).st_size
115 self._end = self.size
123 self._end = self.size
116
124
117 def end(self):
125 def end(self):
118 return self._end
126 return self._end
119 def tell(self):
127 def tell(self):
120 return self.offset
128 return self.offset
121 def flush(self):
129 def flush(self):
122 pass
130 pass
123
131
124 @property
132 @property
125 def closed(self):
133 def closed(self):
126 return self.fp.closed
134 return self.fp.closed
127
135
128 def close(self):
136 def close(self):
129 self.fp.close()
137 self.fp.close()
130
138
131 def seek(self, offset, whence=0):
139 def seek(self, offset, whence=0):
132 '''virtual file offset spans real file and data'''
140 '''virtual file offset spans real file and data'''
133 if whence == 0:
141 if whence == 0:
134 self.offset = offset
142 self.offset = offset
135 elif whence == 1:
143 elif whence == 1:
136 self.offset += offset
144 self.offset += offset
137 elif whence == 2:
145 elif whence == 2:
138 self.offset = self.end() + offset
146 self.offset = self.end() + offset
139 if self.offset < self.size:
147 if self.offset < self.size:
140 self.fp.seek(self.offset)
148 self.fp.seek(self.offset)
141
149
142 def read(self, count=-1):
150 def read(self, count=-1):
143 '''only trick here is reads that span real file and data'''
151 '''only trick here is reads that span real file and data'''
144 ret = ""
152 ret = ""
145 if self.offset < self.size:
153 if self.offset < self.size:
146 s = self.fp.read(count)
154 s = self.fp.read(count)
147 ret = s
155 ret = s
148 self.offset += len(s)
156 self.offset += len(s)
149 if count > 0:
157 if count > 0:
150 count -= len(s)
158 count -= len(s)
151 if count != 0:
159 if count != 0:
152 doff = self.offset - self.size
160 doff = self.offset - self.size
153 self.data.insert(0, "".join(self.data))
161 self.data.insert(0, "".join(self.data))
154 del self.data[1:]
162 del self.data[1:]
155 s = self.data[0][doff:doff + count]
163 s = self.data[0][doff:doff + count]
156 self.offset += len(s)
164 self.offset += len(s)
157 ret += s
165 ret += s
158 return ret
166 return ret
159
167
160 def write(self, s):
168 def write(self, s):
161 self.data.append(bytes(s))
169 self.data.append(bytes(s))
162 self.offset += len(s)
170 self.offset += len(s)
163 self._end += len(s)
171 self._end += len(s)
164
172
165 def __enter__(self):
173 def __enter__(self):
166 self.fp.__enter__()
174 self.fp.__enter__()
167 return self
175 return self
168
176
169 def __exit__(self, *args):
177 def __exit__(self, *args):
170 return self.fp.__exit__(*args)
178 return self.fp.__exit__(*args)
171
179
172 def _divertopener(opener, target):
180 def _divertopener(opener, target):
173 """build an opener that writes in 'target.a' instead of 'target'"""
181 """build an opener that writes in 'target.a' instead of 'target'"""
174 def _divert(name, mode='r', checkambig=False):
182 def _divert(name, mode='r', checkambig=False):
175 if name != target:
183 if name != target:
176 return opener(name, mode)
184 return opener(name, mode)
177 return opener(name + ".a", mode)
185 return opener(name + ".a", mode)
178 return _divert
186 return _divert
179
187
180 def _delayopener(opener, target, buf):
188 def _delayopener(opener, target, buf):
181 """build an opener that stores chunks in 'buf' instead of 'target'"""
189 """build an opener that stores chunks in 'buf' instead of 'target'"""
182 def _delay(name, mode='r', checkambig=False):
190 def _delay(name, mode='r', checkambig=False):
183 if name != target:
191 if name != target:
184 return opener(name, mode)
192 return opener(name, mode)
185 return appender(opener, name, mode, buf)
193 return appender(opener, name, mode, buf)
186 return _delay
194 return _delay
187
195
188 @attr.s
196 @attr.s
189 class _changelogrevision(object):
197 class _changelogrevision(object):
190 # Extensions might modify _defaultextra, so let the constructor below pass
198 # Extensions might modify _defaultextra, so let the constructor below pass
191 # it in
199 # it in
192 extra = attr.ib()
200 extra = attr.ib()
193 manifest = attr.ib(default=nullid)
201 manifest = attr.ib(default=nullid)
194 user = attr.ib(default='')
202 user = attr.ib(default='')
195 date = attr.ib(default=(0, 0))
203 date = attr.ib(default=(0, 0))
196 files = attr.ib(default=attr.Factory(list))
204 files = attr.ib(default=attr.Factory(list))
197 p1copies = attr.ib(default=None)
205 p1copies = attr.ib(default=None)
198 p2copies = attr.ib(default=None)
206 p2copies = attr.ib(default=None)
199 description = attr.ib(default='')
207 description = attr.ib(default='')
200
208
201 class changelogrevision(object):
209 class changelogrevision(object):
202 """Holds results of a parsed changelog revision.
210 """Holds results of a parsed changelog revision.
203
211
204 Changelog revisions consist of multiple pieces of data, including
212 Changelog revisions consist of multiple pieces of data, including
205 the manifest node, user, and date. This object exposes a view into
213 the manifest node, user, and date. This object exposes a view into
206 the parsed object.
214 the parsed object.
207 """
215 """
208
216
209 __slots__ = (
217 __slots__ = (
210 r'_offsets',
218 r'_offsets',
211 r'_text',
219 r'_text',
212 )
220 )
213
221
214 def __new__(cls, text):
222 def __new__(cls, text):
215 if not text:
223 if not text:
216 return _changelogrevision(extra=_defaultextra)
224 return _changelogrevision(extra=_defaultextra)
217
225
218 self = super(changelogrevision, cls).__new__(cls)
226 self = super(changelogrevision, cls).__new__(cls)
219 # We could return here and implement the following as an __init__.
227 # We could return here and implement the following as an __init__.
220 # But doing it here is equivalent and saves an extra function call.
228 # But doing it here is equivalent and saves an extra function call.
221
229
222 # format used:
230 # format used:
223 # nodeid\n : manifest node in ascii
231 # nodeid\n : manifest node in ascii
224 # user\n : user, no \n or \r allowed
232 # user\n : user, no \n or \r allowed
225 # time tz extra\n : date (time is int or float, timezone is int)
233 # time tz extra\n : date (time is int or float, timezone is int)
226 # : extra is metadata, encoded and separated by '\0'
234 # : extra is metadata, encoded and separated by '\0'
227 # : older versions ignore it
235 # : older versions ignore it
228 # files\n\n : files modified by the cset, no \n or \r allowed
236 # files\n\n : files modified by the cset, no \n or \r allowed
229 # (.*) : comment (free text, ideally utf-8)
237 # (.*) : comment (free text, ideally utf-8)
230 #
238 #
231 # changelog v0 doesn't use extra
239 # changelog v0 doesn't use extra
232
240
233 nl1 = text.index('\n')
241 nl1 = text.index('\n')
234 nl2 = text.index('\n', nl1 + 1)
242 nl2 = text.index('\n', nl1 + 1)
235 nl3 = text.index('\n', nl2 + 1)
243 nl3 = text.index('\n', nl2 + 1)
236
244
237 # The list of files may be empty. Which means nl3 is the first of the
245 # The list of files may be empty. Which means nl3 is the first of the
238 # double newline that precedes the description.
246 # double newline that precedes the description.
239 if text[nl3 + 1:nl3 + 2] == '\n':
247 if text[nl3 + 1:nl3 + 2] == '\n':
240 doublenl = nl3
248 doublenl = nl3
241 else:
249 else:
242 doublenl = text.index('\n\n', nl3 + 1)
250 doublenl = text.index('\n\n', nl3 + 1)
243
251
244 self._offsets = (nl1, nl2, nl3, doublenl)
252 self._offsets = (nl1, nl2, nl3, doublenl)
245 self._text = text
253 self._text = text
246
254
247 return self
255 return self
248
256
249 @property
257 @property
250 def manifest(self):
258 def manifest(self):
251 return bin(self._text[0:self._offsets[0]])
259 return bin(self._text[0:self._offsets[0]])
252
260
253 @property
261 @property
254 def user(self):
262 def user(self):
255 off = self._offsets
263 off = self._offsets
256 return encoding.tolocal(self._text[off[0] + 1:off[1]])
264 return encoding.tolocal(self._text[off[0] + 1:off[1]])
257
265
258 @property
266 @property
259 def _rawdate(self):
267 def _rawdate(self):
260 off = self._offsets
268 off = self._offsets
261 dateextra = self._text[off[1] + 1:off[2]]
269 dateextra = self._text[off[1] + 1:off[2]]
262 return dateextra.split(' ', 2)[0:2]
270 return dateextra.split(' ', 2)[0:2]
263
271
264 @property
272 @property
265 def _rawextra(self):
273 def _rawextra(self):
266 off = self._offsets
274 off = self._offsets
267 dateextra = self._text[off[1] + 1:off[2]]
275 dateextra = self._text[off[1] + 1:off[2]]
268 fields = dateextra.split(' ', 2)
276 fields = dateextra.split(' ', 2)
269 if len(fields) != 3:
277 if len(fields) != 3:
270 return None
278 return None
271
279
272 return fields[2]
280 return fields[2]
273
281
274 @property
282 @property
275 def date(self):
283 def date(self):
276 raw = self._rawdate
284 raw = self._rawdate
277 time = float(raw[0])
285 time = float(raw[0])
278 # Various tools did silly things with the timezone.
286 # Various tools did silly things with the timezone.
279 try:
287 try:
280 timezone = int(raw[1])
288 timezone = int(raw[1])
281 except ValueError:
289 except ValueError:
282 timezone = 0
290 timezone = 0
283
291
284 return time, timezone
292 return time, timezone
285
293
286 @property
294 @property
287 def extra(self):
295 def extra(self):
288 raw = self._rawextra
296 raw = self._rawextra
289 if raw is None:
297 if raw is None:
290 return _defaultextra
298 return _defaultextra
291
299
292 return decodeextra(raw)
300 return decodeextra(raw)
293
301
294 @property
302 @property
295 def files(self):
303 def files(self):
296 off = self._offsets
304 off = self._offsets
297 if off[2] == off[3]:
305 if off[2] == off[3]:
298 return []
306 return []
299
307
300 return self._text[off[2] + 1:off[3]].split('\n')
308 return self._text[off[2] + 1:off[3]].split('\n')
301
309
302 @property
310 @property
303 def p1copies(self):
311 def p1copies(self):
304 rawcopies = self.extra.get('p1copies')
312 rawcopies = self.extra.get('p1copies')
305 return rawcopies and decodecopies(rawcopies)
313 return rawcopies and decodecopies(rawcopies)
306
314
307 @property
315 @property
308 def p2copies(self):
316 def p2copies(self):
309 rawcopies = self.extra.get('p2copies')
317 rawcopies = self.extra.get('p2copies')
310 return rawcopies and decodecopies(rawcopies)
318 return rawcopies and decodecopies(rawcopies)
311
319
312 @property
320 @property
313 def description(self):
321 def description(self):
314 return encoding.tolocal(self._text[self._offsets[3] + 2:])
322 return encoding.tolocal(self._text[self._offsets[3] + 2:])
315
323
316 class changelog(revlog.revlog):
324 class changelog(revlog.revlog):
317 def __init__(self, opener, trypending=False):
325 def __init__(self, opener, trypending=False):
318 """Load a changelog revlog using an opener.
326 """Load a changelog revlog using an opener.
319
327
320 If ``trypending`` is true, we attempt to load the index from a
328 If ``trypending`` is true, we attempt to load the index from a
321 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
329 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
322 The ``00changelog.i.a`` file contains index (and possibly inline
330 The ``00changelog.i.a`` file contains index (and possibly inline
323 revision) data for a transaction that hasn't been finalized yet.
331 revision) data for a transaction that hasn't been finalized yet.
324 It exists in a separate file to facilitate readers (such as
332 It exists in a separate file to facilitate readers (such as
325 hooks processes) accessing data before a transaction is finalized.
333 hooks processes) accessing data before a transaction is finalized.
326 """
334 """
327 if trypending and opener.exists('00changelog.i.a'):
335 if trypending and opener.exists('00changelog.i.a'):
328 indexfile = '00changelog.i.a'
336 indexfile = '00changelog.i.a'
329 else:
337 else:
330 indexfile = '00changelog.i'
338 indexfile = '00changelog.i'
331
339
332 datafile = '00changelog.d'
340 datafile = '00changelog.d'
333 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
341 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
334 checkambig=True, mmaplargeindex=True)
342 checkambig=True, mmaplargeindex=True)
335
343
336 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
344 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
337 # changelogs don't benefit from generaldelta.
345 # changelogs don't benefit from generaldelta.
338
346
339 self.version &= ~revlog.FLAG_GENERALDELTA
347 self.version &= ~revlog.FLAG_GENERALDELTA
340 self._generaldelta = False
348 self._generaldelta = False
341
349
342 # Delta chains for changelogs tend to be very small because entries
350 # Delta chains for changelogs tend to be very small because entries
343 # tend to be small and don't delta well with each. So disable delta
351 # tend to be small and don't delta well with each. So disable delta
344 # chains.
352 # chains.
345 self._storedeltachains = False
353 self._storedeltachains = False
346
354
347 self._realopener = opener
355 self._realopener = opener
348 self._delayed = False
356 self._delayed = False
349 self._delaybuf = None
357 self._delaybuf = None
350 self._divert = False
358 self._divert = False
351 self.filteredrevs = frozenset()
359 self.filteredrevs = frozenset()
352
360
353 def tiprev(self):
361 def tiprev(self):
354 for i in pycompat.xrange(len(self) -1, -2, -1):
362 for i in pycompat.xrange(len(self) -1, -2, -1):
355 if i not in self.filteredrevs:
363 if i not in self.filteredrevs:
356 return i
364 return i
357
365
358 def tip(self):
366 def tip(self):
359 """filtered version of revlog.tip"""
367 """filtered version of revlog.tip"""
360 return self.node(self.tiprev())
368 return self.node(self.tiprev())
361
369
362 def __contains__(self, rev):
370 def __contains__(self, rev):
363 """filtered version of revlog.__contains__"""
371 """filtered version of revlog.__contains__"""
364 return (0 <= rev < len(self)
372 return (0 <= rev < len(self)
365 and rev not in self.filteredrevs)
373 and rev not in self.filteredrevs)
366
374
367 def __iter__(self):
375 def __iter__(self):
368 """filtered version of revlog.__iter__"""
376 """filtered version of revlog.__iter__"""
369 if len(self.filteredrevs) == 0:
377 if len(self.filteredrevs) == 0:
370 return revlog.revlog.__iter__(self)
378 return revlog.revlog.__iter__(self)
371
379
372 def filterediter():
380 def filterediter():
373 for i in pycompat.xrange(len(self)):
381 for i in pycompat.xrange(len(self)):
374 if i not in self.filteredrevs:
382 if i not in self.filteredrevs:
375 yield i
383 yield i
376
384
377 return filterediter()
385 return filterediter()
378
386
379 def revs(self, start=0, stop=None):
387 def revs(self, start=0, stop=None):
380 """filtered version of revlog.revs"""
388 """filtered version of revlog.revs"""
381 for i in super(changelog, self).revs(start, stop):
389 for i in super(changelog, self).revs(start, stop):
382 if i not in self.filteredrevs:
390 if i not in self.filteredrevs:
383 yield i
391 yield i
384
392
385 def reachableroots(self, minroot, heads, roots, includepath=False):
393 def reachableroots(self, minroot, heads, roots, includepath=False):
386 return self.index.reachableroots2(minroot, heads, roots, includepath)
394 return self.index.reachableroots2(minroot, heads, roots, includepath)
387
395
388 def _checknofilteredinrevs(self, revs):
396 def _checknofilteredinrevs(self, revs):
389 """raise the appropriate error if 'revs' contains a filtered revision
397 """raise the appropriate error if 'revs' contains a filtered revision
390
398
391 This returns a version of 'revs' to be used thereafter by the caller.
399 This returns a version of 'revs' to be used thereafter by the caller.
392 In particular, if revs is an iterator, it is converted into a set.
400 In particular, if revs is an iterator, it is converted into a set.
393 """
401 """
394 safehasattr = util.safehasattr
402 safehasattr = util.safehasattr
395 if safehasattr(revs, '__next__'):
403 if safehasattr(revs, '__next__'):
396 # Note that inspect.isgenerator() is not true for iterators,
404 # Note that inspect.isgenerator() is not true for iterators,
397 revs = set(revs)
405 revs = set(revs)
398
406
399 filteredrevs = self.filteredrevs
407 filteredrevs = self.filteredrevs
400 if safehasattr(revs, 'first'): # smartset
408 if safehasattr(revs, 'first'): # smartset
401 offenders = revs & filteredrevs
409 offenders = revs & filteredrevs
402 else:
410 else:
403 offenders = filteredrevs.intersection(revs)
411 offenders = filteredrevs.intersection(revs)
404
412
405 for rev in offenders:
413 for rev in offenders:
406 raise error.FilteredIndexError(rev)
414 raise error.FilteredIndexError(rev)
407 return revs
415 return revs
408
416
409 def headrevs(self, revs=None):
417 def headrevs(self, revs=None):
410 if revs is None and self.filteredrevs:
418 if revs is None and self.filteredrevs:
411 try:
419 try:
412 return self.index.headrevsfiltered(self.filteredrevs)
420 return self.index.headrevsfiltered(self.filteredrevs)
413 # AttributeError covers non-c-extension environments and
421 # AttributeError covers non-c-extension environments and
414 # old c extensions without filter handling.
422 # old c extensions without filter handling.
415 except AttributeError:
423 except AttributeError:
416 return self._headrevs()
424 return self._headrevs()
417
425
418 if self.filteredrevs:
426 if self.filteredrevs:
419 revs = self._checknofilteredinrevs(revs)
427 revs = self._checknofilteredinrevs(revs)
420 return super(changelog, self).headrevs(revs)
428 return super(changelog, self).headrevs(revs)
421
429
422 def strip(self, *args, **kwargs):
430 def strip(self, *args, **kwargs):
423 # XXX make something better than assert
431 # XXX make something better than assert
424 # We can't expect proper strip behavior if we are filtered.
432 # We can't expect proper strip behavior if we are filtered.
425 assert not self.filteredrevs
433 assert not self.filteredrevs
426 super(changelog, self).strip(*args, **kwargs)
434 super(changelog, self).strip(*args, **kwargs)
427
435
428 def rev(self, node):
436 def rev(self, node):
429 """filtered version of revlog.rev"""
437 """filtered version of revlog.rev"""
430 r = super(changelog, self).rev(node)
438 r = super(changelog, self).rev(node)
431 if r in self.filteredrevs:
439 if r in self.filteredrevs:
432 raise error.FilteredLookupError(hex(node), self.indexfile,
440 raise error.FilteredLookupError(hex(node), self.indexfile,
433 _('filtered node'))
441 _('filtered node'))
434 return r
442 return r
435
443
436 def node(self, rev):
444 def node(self, rev):
437 """filtered version of revlog.node"""
445 """filtered version of revlog.node"""
438 if rev in self.filteredrevs:
446 if rev in self.filteredrevs:
439 raise error.FilteredIndexError(rev)
447 raise error.FilteredIndexError(rev)
440 return super(changelog, self).node(rev)
448 return super(changelog, self).node(rev)
441
449
442 def linkrev(self, rev):
450 def linkrev(self, rev):
443 """filtered version of revlog.linkrev"""
451 """filtered version of revlog.linkrev"""
444 if rev in self.filteredrevs:
452 if rev in self.filteredrevs:
445 raise error.FilteredIndexError(rev)
453 raise error.FilteredIndexError(rev)
446 return super(changelog, self).linkrev(rev)
454 return super(changelog, self).linkrev(rev)
447
455
448 def parentrevs(self, rev):
456 def parentrevs(self, rev):
449 """filtered version of revlog.parentrevs"""
457 """filtered version of revlog.parentrevs"""
450 if rev in self.filteredrevs:
458 if rev in self.filteredrevs:
451 raise error.FilteredIndexError(rev)
459 raise error.FilteredIndexError(rev)
452 return super(changelog, self).parentrevs(rev)
460 return super(changelog, self).parentrevs(rev)
453
461
454 def flags(self, rev):
462 def flags(self, rev):
455 """filtered version of revlog.flags"""
463 """filtered version of revlog.flags"""
456 if rev in self.filteredrevs:
464 if rev in self.filteredrevs:
457 raise error.FilteredIndexError(rev)
465 raise error.FilteredIndexError(rev)
458 return super(changelog, self).flags(rev)
466 return super(changelog, self).flags(rev)
459
467
460 def delayupdate(self, tr):
468 def delayupdate(self, tr):
461 "delay visibility of index updates to other readers"
469 "delay visibility of index updates to other readers"
462
470
463 if not self._delayed:
471 if not self._delayed:
464 if len(self) == 0:
472 if len(self) == 0:
465 self._divert = True
473 self._divert = True
466 if self._realopener.exists(self.indexfile + '.a'):
474 if self._realopener.exists(self.indexfile + '.a'):
467 self._realopener.unlink(self.indexfile + '.a')
475 self._realopener.unlink(self.indexfile + '.a')
468 self.opener = _divertopener(self._realopener, self.indexfile)
476 self.opener = _divertopener(self._realopener, self.indexfile)
469 else:
477 else:
470 self._delaybuf = []
478 self._delaybuf = []
471 self.opener = _delayopener(self._realopener, self.indexfile,
479 self.opener = _delayopener(self._realopener, self.indexfile,
472 self._delaybuf)
480 self._delaybuf)
473 self._delayed = True
481 self._delayed = True
474 tr.addpending('cl-%i' % id(self), self._writepending)
482 tr.addpending('cl-%i' % id(self), self._writepending)
475 tr.addfinalize('cl-%i' % id(self), self._finalize)
483 tr.addfinalize('cl-%i' % id(self), self._finalize)
476
484
477 def _finalize(self, tr):
485 def _finalize(self, tr):
478 "finalize index updates"
486 "finalize index updates"
479 self._delayed = False
487 self._delayed = False
480 self.opener = self._realopener
488 self.opener = self._realopener
481 # move redirected index data back into place
489 # move redirected index data back into place
482 if self._divert:
490 if self._divert:
483 assert not self._delaybuf
491 assert not self._delaybuf
484 tmpname = self.indexfile + ".a"
492 tmpname = self.indexfile + ".a"
485 nfile = self.opener.open(tmpname)
493 nfile = self.opener.open(tmpname)
486 nfile.close()
494 nfile.close()
487 self.opener.rename(tmpname, self.indexfile, checkambig=True)
495 self.opener.rename(tmpname, self.indexfile, checkambig=True)
488 elif self._delaybuf:
496 elif self._delaybuf:
489 fp = self.opener(self.indexfile, 'a', checkambig=True)
497 fp = self.opener(self.indexfile, 'a', checkambig=True)
490 fp.write("".join(self._delaybuf))
498 fp.write("".join(self._delaybuf))
491 fp.close()
499 fp.close()
492 self._delaybuf = None
500 self._delaybuf = None
493 self._divert = False
501 self._divert = False
494 # split when we're done
502 # split when we're done
495 self._enforceinlinesize(tr)
503 self._enforceinlinesize(tr)
496
504
497 def _writepending(self, tr):
505 def _writepending(self, tr):
498 "create a file containing the unfinalized state for pretxnchangegroup"
506 "create a file containing the unfinalized state for pretxnchangegroup"
499 if self._delaybuf:
507 if self._delaybuf:
500 # make a temporary copy of the index
508 # make a temporary copy of the index
501 fp1 = self._realopener(self.indexfile)
509 fp1 = self._realopener(self.indexfile)
502 pendingfilename = self.indexfile + ".a"
510 pendingfilename = self.indexfile + ".a"
503 # register as a temp file to ensure cleanup on failure
511 # register as a temp file to ensure cleanup on failure
504 tr.registertmp(pendingfilename)
512 tr.registertmp(pendingfilename)
505 # write existing data
513 # write existing data
506 fp2 = self._realopener(pendingfilename, "w")
514 fp2 = self._realopener(pendingfilename, "w")
507 fp2.write(fp1.read())
515 fp2.write(fp1.read())
508 # add pending data
516 # add pending data
509 fp2.write("".join(self._delaybuf))
517 fp2.write("".join(self._delaybuf))
510 fp2.close()
518 fp2.close()
511 # switch modes so finalize can simply rename
519 # switch modes so finalize can simply rename
512 self._delaybuf = None
520 self._delaybuf = None
513 self._divert = True
521 self._divert = True
514 self.opener = _divertopener(self._realopener, self.indexfile)
522 self.opener = _divertopener(self._realopener, self.indexfile)
515
523
516 if self._divert:
524 if self._divert:
517 return True
525 return True
518
526
519 return False
527 return False
520
528
521 def _enforceinlinesize(self, tr, fp=None):
529 def _enforceinlinesize(self, tr, fp=None):
522 if not self._delayed:
530 if not self._delayed:
523 revlog.revlog._enforceinlinesize(self, tr, fp)
531 revlog.revlog._enforceinlinesize(self, tr, fp)
524
532
525 def read(self, node):
533 def read(self, node):
526 """Obtain data from a parsed changelog revision.
534 """Obtain data from a parsed changelog revision.
527
535
528 Returns a 6-tuple of:
536 Returns a 6-tuple of:
529
537
530 - manifest node in binary
538 - manifest node in binary
531 - author/user as a localstr
539 - author/user as a localstr
532 - date as a 2-tuple of (time, timezone)
540 - date as a 2-tuple of (time, timezone)
533 - list of files
541 - list of files
534 - commit message as a localstr
542 - commit message as a localstr
535 - dict of extra metadata
543 - dict of extra metadata
536
544
537 Unless you need to access all fields, consider calling
545 Unless you need to access all fields, consider calling
538 ``changelogrevision`` instead, as it is faster for partial object
546 ``changelogrevision`` instead, as it is faster for partial object
539 access.
547 access.
540 """
548 """
541 c = changelogrevision(self.revision(node))
549 c = changelogrevision(self.revision(node))
542 return (
550 return (
543 c.manifest,
551 c.manifest,
544 c.user,
552 c.user,
545 c.date,
553 c.date,
546 c.files,
554 c.files,
547 c.description,
555 c.description,
548 c.extra
556 c.extra
549 )
557 )
550
558
551 def changelogrevision(self, nodeorrev):
559 def changelogrevision(self, nodeorrev):
552 """Obtain a ``changelogrevision`` for a node or revision."""
560 """Obtain a ``changelogrevision`` for a node or revision."""
553 return changelogrevision(self.revision(nodeorrev))
561 return changelogrevision(self.revision(nodeorrev))
554
562
555 def readfiles(self, node):
563 def readfiles(self, node):
556 """
564 """
557 short version of read that only returns the files modified by the cset
565 short version of read that only returns the files modified by the cset
558 """
566 """
559 text = self.revision(node)
567 text = self.revision(node)
560 if not text:
568 if not text:
561 return []
569 return []
562 last = text.index("\n\n")
570 last = text.index("\n\n")
563 l = text[:last].split('\n')
571 l = text[:last].split('\n')
564 return l[3:]
572 return l[3:]
565
573
566 def add(self, manifest, files, desc, transaction, p1, p2,
574 def add(self, manifest, files, desc, transaction, p1, p2,
567 user, date=None, extra=None, p1copies=None, p2copies=None):
575 user, date=None, extra=None, p1copies=None, p2copies=None,
576 filesadded=None, filesremoved=None):
568 # Convert to UTF-8 encoded bytestrings as the very first
577 # Convert to UTF-8 encoded bytestrings as the very first
569 # thing: calling any method on a localstr object will turn it
578 # thing: calling any method on a localstr object will turn it
570 # into a str object and the cached UTF-8 string is thus lost.
579 # into a str object and the cached UTF-8 string is thus lost.
571 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
580 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
572
581
573 user = user.strip()
582 user = user.strip()
574 # An empty username or a username with a "\n" will make the
583 # An empty username or a username with a "\n" will make the
575 # revision text contain two "\n\n" sequences -> corrupt
584 # revision text contain two "\n\n" sequences -> corrupt
576 # repository since read cannot unpack the revision.
585 # repository since read cannot unpack the revision.
577 if not user:
586 if not user:
578 raise error.StorageError(_("empty username"))
587 raise error.StorageError(_("empty username"))
579 if "\n" in user:
588 if "\n" in user:
580 raise error.StorageError(_("username %r contains a newline")
589 raise error.StorageError(_("username %r contains a newline")
581 % pycompat.bytestr(user))
590 % pycompat.bytestr(user))
582
591
583 desc = stripdesc(desc)
592 desc = stripdesc(desc)
584
593
585 if date:
594 if date:
586 parseddate = "%d %d" % dateutil.parsedate(date)
595 parseddate = "%d %d" % dateutil.parsedate(date)
587 else:
596 else:
588 parseddate = "%d %d" % dateutil.makedate()
597 parseddate = "%d %d" % dateutil.makedate()
589 if extra:
598 if extra:
590 branch = extra.get("branch")
599 branch = extra.get("branch")
591 if branch in ("default", ""):
600 if branch in ("default", ""):
592 del extra["branch"]
601 del extra["branch"]
593 elif branch in (".", "null", "tip"):
602 elif branch in (".", "null", "tip"):
594 raise error.StorageError(_('the name \'%s\' is reserved')
603 raise error.StorageError(_('the name \'%s\' is reserved')
595 % branch)
604 % branch)
596 if (p1copies is not None or p2copies is not None) and extra is None:
605 extrasentries = p1copies, p2copies, filesadded, filesremoved
606 if extra is None and any(x is not None for x in extrasentries):
597 extra = {}
607 extra = {}
598 if p1copies is not None:
608 if p1copies is not None:
599 extra['p1copies'] = encodecopies(p1copies)
609 extra['p1copies'] = encodecopies(p1copies)
600 if p2copies is not None:
610 if p2copies is not None:
601 extra['p2copies'] = encodecopies(p2copies)
611 extra['p2copies'] = encodecopies(p2copies)
612 sortedfiles = sorted(files)
613 if filesadded is not None:
614 extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
615 if filesremoved is not None:
616 extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
602
617
603 if extra:
618 if extra:
604 extra = encodeextra(extra)
619 extra = encodeextra(extra)
605 parseddate = "%s %s" % (parseddate, extra)
620 parseddate = "%s %s" % (parseddate, extra)
606 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
621 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
607 text = "\n".join(l)
622 text = "\n".join(l)
608 return self.addrevision(text, transaction, len(self), p1, p2)
623 return self.addrevision(text, transaction, len(self), p1, p2)
609
624
610 def branchinfo(self, rev):
625 def branchinfo(self, rev):
611 """return the branch name and open/close state of a revision
626 """return the branch name and open/close state of a revision
612
627
613 This function exists because creating a changectx object
628 This function exists because creating a changectx object
614 just to access this is costly."""
629 just to access this is costly."""
615 extra = self.read(rev)[5]
630 extra = self.read(rev)[5]
616 return encoding.tolocal(extra.get("branch")), 'close' in extra
631 return encoding.tolocal(extra.get("branch")), 'close' in extra
617
632
618 def _nodeduplicatecallback(self, transaction, node):
633 def _nodeduplicatecallback(self, transaction, node):
619 # keep track of revisions that got "re-added", eg: unbunde of know rev.
634 # keep track of revisions that got "re-added", eg: unbunde of know rev.
620 #
635 #
621 # We track them in a list to preserve their order from the source bundle
636 # We track them in a list to preserve their order from the source bundle
622 duplicates = transaction.changes.setdefault('revduplicates', [])
637 duplicates = transaction.changes.setdefault('revduplicates', [])
623 duplicates.append(self.rev(node))
638 duplicates.append(self.rev(node))
@@ -1,3180 +1,3193 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 class mixedrepostorecache(_basefilecache):
125 class mixedrepostorecache(_basefilecache):
126 """filecache for a mix files in .hg/store and outside"""
126 """filecache for a mix files in .hg/store and outside"""
127 def __init__(self, *pathsandlocations):
127 def __init__(self, *pathsandlocations):
128 # scmutil.filecache only uses the path for passing back into our
128 # scmutil.filecache only uses the path for passing back into our
129 # join(), so we can safely pass a list of paths and locations
129 # join(), so we can safely pass a list of paths and locations
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 for path, location in pathsandlocations:
131 for path, location in pathsandlocations:
132 _cachedfiles.update(pathsandlocations)
132 _cachedfiles.update(pathsandlocations)
133
133
134 def join(self, obj, fnameandlocation):
134 def join(self, obj, fnameandlocation):
135 fname, location = fnameandlocation
135 fname, location = fnameandlocation
136 if location == '':
136 if location == '':
137 return obj.vfs.join(fname)
137 return obj.vfs.join(fname)
138 else:
138 else:
139 if location != 'store':
139 if location != 'store':
140 raise error.ProgrammingError('unexpected location: %s' %
140 raise error.ProgrammingError('unexpected location: %s' %
141 location)
141 location)
142 return obj.sjoin(fname)
142 return obj.sjoin(fname)
143
143
144 def isfilecached(repo, name):
144 def isfilecached(repo, name):
145 """check if a repo has already cached "name" filecache-ed property
145 """check if a repo has already cached "name" filecache-ed property
146
146
147 This returns (cachedobj-or-None, iscached) tuple.
147 This returns (cachedobj-or-None, iscached) tuple.
148 """
148 """
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
150 if not cacheentry:
150 if not cacheentry:
151 return None, False
151 return None, False
152 return cacheentry.obj, True
152 return cacheentry.obj, True
153
153
154 class unfilteredpropertycache(util.propertycache):
154 class unfilteredpropertycache(util.propertycache):
155 """propertycache that apply to unfiltered repo only"""
155 """propertycache that apply to unfiltered repo only"""
156
156
157 def __get__(self, repo, type=None):
157 def __get__(self, repo, type=None):
158 unfi = repo.unfiltered()
158 unfi = repo.unfiltered()
159 if unfi is repo:
159 if unfi is repo:
160 return super(unfilteredpropertycache, self).__get__(unfi)
160 return super(unfilteredpropertycache, self).__get__(unfi)
161 return getattr(unfi, self.name)
161 return getattr(unfi, self.name)
162
162
163 class filteredpropertycache(util.propertycache):
163 class filteredpropertycache(util.propertycache):
164 """propertycache that must take filtering in account"""
164 """propertycache that must take filtering in account"""
165
165
166 def cachevalue(self, obj, value):
166 def cachevalue(self, obj, value):
167 object.__setattr__(obj, self.name, value)
167 object.__setattr__(obj, self.name, value)
168
168
169
169
170 def hasunfilteredcache(repo, name):
170 def hasunfilteredcache(repo, name):
171 """check if a repo has an unfilteredpropertycache value for <name>"""
171 """check if a repo has an unfilteredpropertycache value for <name>"""
172 return name in vars(repo.unfiltered())
172 return name in vars(repo.unfiltered())
173
173
174 def unfilteredmethod(orig):
174 def unfilteredmethod(orig):
175 """decorate method that always need to be run on unfiltered version"""
175 """decorate method that always need to be run on unfiltered version"""
176 def wrapper(repo, *args, **kwargs):
176 def wrapper(repo, *args, **kwargs):
177 return orig(repo.unfiltered(), *args, **kwargs)
177 return orig(repo.unfiltered(), *args, **kwargs)
178 return wrapper
178 return wrapper
179
179
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
181 'unbundle'}
181 'unbundle'}
182 legacycaps = moderncaps.union({'changegroupsubset'})
182 legacycaps = moderncaps.union({'changegroupsubset'})
183
183
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
185 class localcommandexecutor(object):
185 class localcommandexecutor(object):
186 def __init__(self, peer):
186 def __init__(self, peer):
187 self._peer = peer
187 self._peer = peer
188 self._sent = False
188 self._sent = False
189 self._closed = False
189 self._closed = False
190
190
191 def __enter__(self):
191 def __enter__(self):
192 return self
192 return self
193
193
194 def __exit__(self, exctype, excvalue, exctb):
194 def __exit__(self, exctype, excvalue, exctb):
195 self.close()
195 self.close()
196
196
197 def callcommand(self, command, args):
197 def callcommand(self, command, args):
198 if self._sent:
198 if self._sent:
199 raise error.ProgrammingError('callcommand() cannot be used after '
199 raise error.ProgrammingError('callcommand() cannot be used after '
200 'sendcommands()')
200 'sendcommands()')
201
201
202 if self._closed:
202 if self._closed:
203 raise error.ProgrammingError('callcommand() cannot be used after '
203 raise error.ProgrammingError('callcommand() cannot be used after '
204 'close()')
204 'close()')
205
205
206 # We don't need to support anything fancy. Just call the named
206 # We don't need to support anything fancy. Just call the named
207 # method on the peer and return a resolved future.
207 # method on the peer and return a resolved future.
208 fn = getattr(self._peer, pycompat.sysstr(command))
208 fn = getattr(self._peer, pycompat.sysstr(command))
209
209
210 f = pycompat.futures.Future()
210 f = pycompat.futures.Future()
211
211
212 try:
212 try:
213 result = fn(**pycompat.strkwargs(args))
213 result = fn(**pycompat.strkwargs(args))
214 except Exception:
214 except Exception:
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
216 else:
216 else:
217 f.set_result(result)
217 f.set_result(result)
218
218
219 return f
219 return f
220
220
221 def sendcommands(self):
221 def sendcommands(self):
222 self._sent = True
222 self._sent = True
223
223
224 def close(self):
224 def close(self):
225 self._closed = True
225 self._closed = True
226
226
227 @interfaceutil.implementer(repository.ipeercommands)
227 @interfaceutil.implementer(repository.ipeercommands)
228 class localpeer(repository.peer):
228 class localpeer(repository.peer):
229 '''peer for a local repo; reflects only the most recent API'''
229 '''peer for a local repo; reflects only the most recent API'''
230
230
231 def __init__(self, repo, caps=None):
231 def __init__(self, repo, caps=None):
232 super(localpeer, self).__init__()
232 super(localpeer, self).__init__()
233
233
234 if caps is None:
234 if caps is None:
235 caps = moderncaps.copy()
235 caps = moderncaps.copy()
236 self._repo = repo.filtered('served')
236 self._repo = repo.filtered('served')
237 self.ui = repo.ui
237 self.ui = repo.ui
238 self._caps = repo._restrictcapabilities(caps)
238 self._caps = repo._restrictcapabilities(caps)
239
239
240 # Begin of _basepeer interface.
240 # Begin of _basepeer interface.
241
241
242 def url(self):
242 def url(self):
243 return self._repo.url()
243 return self._repo.url()
244
244
245 def local(self):
245 def local(self):
246 return self._repo
246 return self._repo
247
247
248 def peer(self):
248 def peer(self):
249 return self
249 return self
250
250
251 def canpush(self):
251 def canpush(self):
252 return True
252 return True
253
253
254 def close(self):
254 def close(self):
255 self._repo.close()
255 self._repo.close()
256
256
257 # End of _basepeer interface.
257 # End of _basepeer interface.
258
258
259 # Begin of _basewirecommands interface.
259 # Begin of _basewirecommands interface.
260
260
261 def branchmap(self):
261 def branchmap(self):
262 return self._repo.branchmap()
262 return self._repo.branchmap()
263
263
264 def capabilities(self):
264 def capabilities(self):
265 return self._caps
265 return self._caps
266
266
267 def clonebundles(self):
267 def clonebundles(self):
268 return self._repo.tryread('clonebundles.manifest')
268 return self._repo.tryread('clonebundles.manifest')
269
269
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
271 """Used to test argument passing over the wire"""
271 """Used to test argument passing over the wire"""
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
273 pycompat.bytestr(four),
273 pycompat.bytestr(four),
274 pycompat.bytestr(five))
274 pycompat.bytestr(five))
275
275
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
277 **kwargs):
277 **kwargs):
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
279 common=common, bundlecaps=bundlecaps,
279 common=common, bundlecaps=bundlecaps,
280 **kwargs)[1]
280 **kwargs)[1]
281 cb = util.chunkbuffer(chunks)
281 cb = util.chunkbuffer(chunks)
282
282
283 if exchange.bundle2requested(bundlecaps):
283 if exchange.bundle2requested(bundlecaps):
284 # When requesting a bundle2, getbundle returns a stream to make the
284 # When requesting a bundle2, getbundle returns a stream to make the
285 # wire level function happier. We need to build a proper object
285 # wire level function happier. We need to build a proper object
286 # from it in local peer.
286 # from it in local peer.
287 return bundle2.getunbundler(self.ui, cb)
287 return bundle2.getunbundler(self.ui, cb)
288 else:
288 else:
289 return changegroup.getunbundler('01', cb, None)
289 return changegroup.getunbundler('01', cb, None)
290
290
291 def heads(self):
291 def heads(self):
292 return self._repo.heads()
292 return self._repo.heads()
293
293
294 def known(self, nodes):
294 def known(self, nodes):
295 return self._repo.known(nodes)
295 return self._repo.known(nodes)
296
296
297 def listkeys(self, namespace):
297 def listkeys(self, namespace):
298 return self._repo.listkeys(namespace)
298 return self._repo.listkeys(namespace)
299
299
300 def lookup(self, key):
300 def lookup(self, key):
301 return self._repo.lookup(key)
301 return self._repo.lookup(key)
302
302
303 def pushkey(self, namespace, key, old, new):
303 def pushkey(self, namespace, key, old, new):
304 return self._repo.pushkey(namespace, key, old, new)
304 return self._repo.pushkey(namespace, key, old, new)
305
305
306 def stream_out(self):
306 def stream_out(self):
307 raise error.Abort(_('cannot perform stream clone against local '
307 raise error.Abort(_('cannot perform stream clone against local '
308 'peer'))
308 'peer'))
309
309
310 def unbundle(self, bundle, heads, url):
310 def unbundle(self, bundle, heads, url):
311 """apply a bundle on a repo
311 """apply a bundle on a repo
312
312
313 This function handles the repo locking itself."""
313 This function handles the repo locking itself."""
314 try:
314 try:
315 try:
315 try:
316 bundle = exchange.readbundle(self.ui, bundle, None)
316 bundle = exchange.readbundle(self.ui, bundle, None)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
318 if util.safehasattr(ret, 'getchunks'):
318 if util.safehasattr(ret, 'getchunks'):
319 # This is a bundle20 object, turn it into an unbundler.
319 # This is a bundle20 object, turn it into an unbundler.
320 # This little dance should be dropped eventually when the
320 # This little dance should be dropped eventually when the
321 # API is finally improved.
321 # API is finally improved.
322 stream = util.chunkbuffer(ret.getchunks())
322 stream = util.chunkbuffer(ret.getchunks())
323 ret = bundle2.getunbundler(self.ui, stream)
323 ret = bundle2.getunbundler(self.ui, stream)
324 return ret
324 return ret
325 except Exception as exc:
325 except Exception as exc:
326 # If the exception contains output salvaged from a bundle2
326 # If the exception contains output salvaged from a bundle2
327 # reply, we need to make sure it is printed before continuing
327 # reply, we need to make sure it is printed before continuing
328 # to fail. So we build a bundle2 with such output and consume
328 # to fail. So we build a bundle2 with such output and consume
329 # it directly.
329 # it directly.
330 #
330 #
331 # This is not very elegant but allows a "simple" solution for
331 # This is not very elegant but allows a "simple" solution for
332 # issue4594
332 # issue4594
333 output = getattr(exc, '_bundle2salvagedoutput', ())
333 output = getattr(exc, '_bundle2salvagedoutput', ())
334 if output:
334 if output:
335 bundler = bundle2.bundle20(self._repo.ui)
335 bundler = bundle2.bundle20(self._repo.ui)
336 for out in output:
336 for out in output:
337 bundler.addpart(out)
337 bundler.addpart(out)
338 stream = util.chunkbuffer(bundler.getchunks())
338 stream = util.chunkbuffer(bundler.getchunks())
339 b = bundle2.getunbundler(self.ui, stream)
339 b = bundle2.getunbundler(self.ui, stream)
340 bundle2.processbundle(self._repo, b)
340 bundle2.processbundle(self._repo, b)
341 raise
341 raise
342 except error.PushRaced as exc:
342 except error.PushRaced as exc:
343 raise error.ResponseError(_('push failed:'),
343 raise error.ResponseError(_('push failed:'),
344 stringutil.forcebytestr(exc))
344 stringutil.forcebytestr(exc))
345
345
346 # End of _basewirecommands interface.
346 # End of _basewirecommands interface.
347
347
348 # Begin of peer interface.
348 # Begin of peer interface.
349
349
350 def commandexecutor(self):
350 def commandexecutor(self):
351 return localcommandexecutor(self)
351 return localcommandexecutor(self)
352
352
353 # End of peer interface.
353 # End of peer interface.
354
354
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
356 class locallegacypeer(localpeer):
356 class locallegacypeer(localpeer):
357 '''peer extension which implements legacy methods too; used for tests with
357 '''peer extension which implements legacy methods too; used for tests with
358 restricted capabilities'''
358 restricted capabilities'''
359
359
360 def __init__(self, repo):
360 def __init__(self, repo):
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
362
362
363 # Begin of baselegacywirecommands interface.
363 # Begin of baselegacywirecommands interface.
364
364
365 def between(self, pairs):
365 def between(self, pairs):
366 return self._repo.between(pairs)
366 return self._repo.between(pairs)
367
367
368 def branches(self, nodes):
368 def branches(self, nodes):
369 return self._repo.branches(nodes)
369 return self._repo.branches(nodes)
370
370
371 def changegroup(self, nodes, source):
371 def changegroup(self, nodes, source):
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
373 missingheads=self._repo.heads())
373 missingheads=self._repo.heads())
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
375
375
376 def changegroupsubset(self, bases, heads, source):
376 def changegroupsubset(self, bases, heads, source):
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
378 missingheads=heads)
378 missingheads=heads)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
380
380
381 # End of baselegacywirecommands interface.
381 # End of baselegacywirecommands interface.
382
382
383 # Increment the sub-version when the revlog v2 format changes to lock out old
383 # Increment the sub-version when the revlog v2 format changes to lock out old
384 # clients.
384 # clients.
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
386
386
387 # A repository with the sparserevlog feature will have delta chains that
387 # A repository with the sparserevlog feature will have delta chains that
388 # can spread over a larger span. Sparse reading cuts these large spans into
388 # can spread over a larger span. Sparse reading cuts these large spans into
389 # pieces, so that each piece isn't too big.
389 # pieces, so that each piece isn't too big.
390 # Without the sparserevlog capability, reading from the repository could use
390 # Without the sparserevlog capability, reading from the repository could use
391 # huge amounts of memory, because the whole span would be read at once,
391 # huge amounts of memory, because the whole span would be read at once,
392 # including all the intermediate revisions that aren't pertinent for the chain.
392 # including all the intermediate revisions that aren't pertinent for the chain.
393 # This is why once a repository has enabled sparse-read, it becomes required.
393 # This is why once a repository has enabled sparse-read, it becomes required.
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
395
395
396 # Functions receiving (ui, features) that extensions can register to impact
396 # Functions receiving (ui, features) that extensions can register to impact
397 # the ability to load repositories with custom requirements. Only
397 # the ability to load repositories with custom requirements. Only
398 # functions defined in loaded extensions are called.
398 # functions defined in loaded extensions are called.
399 #
399 #
400 # The function receives a set of requirement strings that the repository
400 # The function receives a set of requirement strings that the repository
401 # is capable of opening. Functions will typically add elements to the
401 # is capable of opening. Functions will typically add elements to the
402 # set to reflect that the extension knows how to handle that requirements.
402 # set to reflect that the extension knows how to handle that requirements.
403 featuresetupfuncs = set()
403 featuresetupfuncs = set()
404
404
405 def makelocalrepository(baseui, path, intents=None):
405 def makelocalrepository(baseui, path, intents=None):
406 """Create a local repository object.
406 """Create a local repository object.
407
407
408 Given arguments needed to construct a local repository, this function
408 Given arguments needed to construct a local repository, this function
409 performs various early repository loading functionality (such as
409 performs various early repository loading functionality (such as
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
411 the repository can be opened, derives a type suitable for representing
411 the repository can be opened, derives a type suitable for representing
412 that repository, and returns an instance of it.
412 that repository, and returns an instance of it.
413
413
414 The returned object conforms to the ``repository.completelocalrepository``
414 The returned object conforms to the ``repository.completelocalrepository``
415 interface.
415 interface.
416
416
417 The repository type is derived by calling a series of factory functions
417 The repository type is derived by calling a series of factory functions
418 for each aspect/interface of the final repository. These are defined by
418 for each aspect/interface of the final repository. These are defined by
419 ``REPO_INTERFACES``.
419 ``REPO_INTERFACES``.
420
420
421 Each factory function is called to produce a type implementing a specific
421 Each factory function is called to produce a type implementing a specific
422 interface. The cumulative list of returned types will be combined into a
422 interface. The cumulative list of returned types will be combined into a
423 new type and that type will be instantiated to represent the local
423 new type and that type will be instantiated to represent the local
424 repository.
424 repository.
425
425
426 The factory functions each receive various state that may be consulted
426 The factory functions each receive various state that may be consulted
427 as part of deriving a type.
427 as part of deriving a type.
428
428
429 Extensions should wrap these factory functions to customize repository type
429 Extensions should wrap these factory functions to customize repository type
430 creation. Note that an extension's wrapped function may be called even if
430 creation. Note that an extension's wrapped function may be called even if
431 that extension is not loaded for the repo being constructed. Extensions
431 that extension is not loaded for the repo being constructed. Extensions
432 should check if their ``__name__`` appears in the
432 should check if their ``__name__`` appears in the
433 ``extensionmodulenames`` set passed to the factory function and no-op if
433 ``extensionmodulenames`` set passed to the factory function and no-op if
434 not.
434 not.
435 """
435 """
436 ui = baseui.copy()
436 ui = baseui.copy()
437 # Prevent copying repo configuration.
437 # Prevent copying repo configuration.
438 ui.copy = baseui.copy
438 ui.copy = baseui.copy
439
439
440 # Working directory VFS rooted at repository root.
440 # Working directory VFS rooted at repository root.
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
442
442
443 # Main VFS for .hg/ directory.
443 # Main VFS for .hg/ directory.
444 hgpath = wdirvfs.join(b'.hg')
444 hgpath = wdirvfs.join(b'.hg')
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
446
446
447 # The .hg/ path should exist and should be a directory. All other
447 # The .hg/ path should exist and should be a directory. All other
448 # cases are errors.
448 # cases are errors.
449 if not hgvfs.isdir():
449 if not hgvfs.isdir():
450 try:
450 try:
451 hgvfs.stat()
451 hgvfs.stat()
452 except OSError as e:
452 except OSError as e:
453 if e.errno != errno.ENOENT:
453 if e.errno != errno.ENOENT:
454 raise
454 raise
455
455
456 raise error.RepoError(_(b'repository %s not found') % path)
456 raise error.RepoError(_(b'repository %s not found') % path)
457
457
458 # .hg/requires file contains a newline-delimited list of
458 # .hg/requires file contains a newline-delimited list of
459 # features/capabilities the opener (us) must have in order to use
459 # features/capabilities the opener (us) must have in order to use
460 # the repository. This file was introduced in Mercurial 0.9.2,
460 # the repository. This file was introduced in Mercurial 0.9.2,
461 # which means very old repositories may not have one. We assume
461 # which means very old repositories may not have one. We assume
462 # a missing file translates to no requirements.
462 # a missing file translates to no requirements.
463 try:
463 try:
464 requirements = set(hgvfs.read(b'requires').splitlines())
464 requirements = set(hgvfs.read(b'requires').splitlines())
465 except IOError as e:
465 except IOError as e:
466 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
467 raise
467 raise
468 requirements = set()
468 requirements = set()
469
469
470 # The .hg/hgrc file may load extensions or contain config options
470 # The .hg/hgrc file may load extensions or contain config options
471 # that influence repository construction. Attempt to load it and
471 # that influence repository construction. Attempt to load it and
472 # process any new extensions that it may have pulled in.
472 # process any new extensions that it may have pulled in.
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
475 extensions.loadall(ui)
475 extensions.loadall(ui)
476 extensions.populateui(ui)
476 extensions.populateui(ui)
477
477
478 # Set of module names of extensions loaded for this repository.
478 # Set of module names of extensions loaded for this repository.
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
480
480
481 supportedrequirements = gathersupportedrequirements(ui)
481 supportedrequirements = gathersupportedrequirements(ui)
482
482
483 # We first validate the requirements are known.
483 # We first validate the requirements are known.
484 ensurerequirementsrecognized(requirements, supportedrequirements)
484 ensurerequirementsrecognized(requirements, supportedrequirements)
485
485
486 # Then we validate that the known set is reasonable to use together.
486 # Then we validate that the known set is reasonable to use together.
487 ensurerequirementscompatible(ui, requirements)
487 ensurerequirementscompatible(ui, requirements)
488
488
489 # TODO there are unhandled edge cases related to opening repositories with
489 # TODO there are unhandled edge cases related to opening repositories with
490 # shared storage. If storage is shared, we should also test for requirements
490 # shared storage. If storage is shared, we should also test for requirements
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
492 # that repo, as that repo may load extensions needed to open it. This is a
492 # that repo, as that repo may load extensions needed to open it. This is a
493 # bit complicated because we don't want the other hgrc to overwrite settings
493 # bit complicated because we don't want the other hgrc to overwrite settings
494 # in this hgrc.
494 # in this hgrc.
495 #
495 #
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
497 # file when sharing repos. But if a requirement is added after the share is
497 # file when sharing repos. But if a requirement is added after the share is
498 # performed, thereby introducing a new requirement for the opener, we may
498 # performed, thereby introducing a new requirement for the opener, we may
499 # will not see that and could encounter a run-time error interacting with
499 # will not see that and could encounter a run-time error interacting with
500 # that shared store since it has an unknown-to-us requirement.
500 # that shared store since it has an unknown-to-us requirement.
501
501
502 # At this point, we know we should be capable of opening the repository.
502 # At this point, we know we should be capable of opening the repository.
503 # Now get on with doing that.
503 # Now get on with doing that.
504
504
505 features = set()
505 features = set()
506
506
507 # The "store" part of the repository holds versioned data. How it is
507 # The "store" part of the repository holds versioned data. How it is
508 # accessed is determined by various requirements. The ``shared`` or
508 # accessed is determined by various requirements. The ``shared`` or
509 # ``relshared`` requirements indicate the store lives in the path contained
509 # ``relshared`` requirements indicate the store lives in the path contained
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
512 if b'shared' in requirements or b'relshared' in requirements:
512 if b'shared' in requirements or b'relshared' in requirements:
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 if b'relshared' in requirements:
514 if b'relshared' in requirements:
515 sharedpath = hgvfs.join(sharedpath)
515 sharedpath = hgvfs.join(sharedpath)
516
516
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518
518
519 if not sharedvfs.exists():
519 if not sharedvfs.exists():
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
521 b'directory %s') % sharedvfs.base)
521 b'directory %s') % sharedvfs.base)
522
522
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
524
524
525 storebasepath = sharedvfs.base
525 storebasepath = sharedvfs.base
526 cachepath = sharedvfs.join(b'cache')
526 cachepath = sharedvfs.join(b'cache')
527 else:
527 else:
528 storebasepath = hgvfs.base
528 storebasepath = hgvfs.base
529 cachepath = hgvfs.join(b'cache')
529 cachepath = hgvfs.join(b'cache')
530 wcachepath = hgvfs.join(b'wcache')
530 wcachepath = hgvfs.join(b'wcache')
531
531
532
532
533 # The store has changed over time and the exact layout is dictated by
533 # The store has changed over time and the exact layout is dictated by
534 # requirements. The store interface abstracts differences across all
534 # requirements. The store interface abstracts differences across all
535 # of them.
535 # of them.
536 store = makestore(requirements, storebasepath,
536 store = makestore(requirements, storebasepath,
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 hgvfs.createmode = store.createmode
538 hgvfs.createmode = store.createmode
539
539
540 storevfs = store.vfs
540 storevfs = store.vfs
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
542
542
543 # The cache vfs is used to manage cache files.
543 # The cache vfs is used to manage cache files.
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 cachevfs.createmode = store.createmode
545 cachevfs.createmode = store.createmode
546 # The cache vfs is used to manage cache files related to the working copy
546 # The cache vfs is used to manage cache files related to the working copy
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
548 wcachevfs.createmode = store.createmode
548 wcachevfs.createmode = store.createmode
549
549
550 # Now resolve the type for the repository object. We do this by repeatedly
550 # Now resolve the type for the repository object. We do this by repeatedly
551 # calling a factory function to produces types for specific aspects of the
551 # calling a factory function to produces types for specific aspects of the
552 # repo's operation. The aggregate returned types are used as base classes
552 # repo's operation. The aggregate returned types are used as base classes
553 # for a dynamically-derived type, which will represent our new repository.
553 # for a dynamically-derived type, which will represent our new repository.
554
554
555 bases = []
555 bases = []
556 extrastate = {}
556 extrastate = {}
557
557
558 for iface, fn in REPO_INTERFACES:
558 for iface, fn in REPO_INTERFACES:
559 # We pass all potentially useful state to give extensions tons of
559 # We pass all potentially useful state to give extensions tons of
560 # flexibility.
560 # flexibility.
561 typ = fn()(ui=ui,
561 typ = fn()(ui=ui,
562 intents=intents,
562 intents=intents,
563 requirements=requirements,
563 requirements=requirements,
564 features=features,
564 features=features,
565 wdirvfs=wdirvfs,
565 wdirvfs=wdirvfs,
566 hgvfs=hgvfs,
566 hgvfs=hgvfs,
567 store=store,
567 store=store,
568 storevfs=storevfs,
568 storevfs=storevfs,
569 storeoptions=storevfs.options,
569 storeoptions=storevfs.options,
570 cachevfs=cachevfs,
570 cachevfs=cachevfs,
571 wcachevfs=wcachevfs,
571 wcachevfs=wcachevfs,
572 extensionmodulenames=extensionmodulenames,
572 extensionmodulenames=extensionmodulenames,
573 extrastate=extrastate,
573 extrastate=extrastate,
574 baseclasses=bases)
574 baseclasses=bases)
575
575
576 if not isinstance(typ, type):
576 if not isinstance(typ, type):
577 raise error.ProgrammingError('unable to construct type for %s' %
577 raise error.ProgrammingError('unable to construct type for %s' %
578 iface)
578 iface)
579
579
580 bases.append(typ)
580 bases.append(typ)
581
581
582 # type() allows you to use characters in type names that wouldn't be
582 # type() allows you to use characters in type names that wouldn't be
583 # recognized as Python symbols in source code. We abuse that to add
583 # recognized as Python symbols in source code. We abuse that to add
584 # rich information about our constructed repo.
584 # rich information about our constructed repo.
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
586 wdirvfs.base,
586 wdirvfs.base,
587 b','.join(sorted(requirements))))
587 b','.join(sorted(requirements))))
588
588
589 cls = type(name, tuple(bases), {})
589 cls = type(name, tuple(bases), {})
590
590
591 return cls(
591 return cls(
592 baseui=baseui,
592 baseui=baseui,
593 ui=ui,
593 ui=ui,
594 origroot=path,
594 origroot=path,
595 wdirvfs=wdirvfs,
595 wdirvfs=wdirvfs,
596 hgvfs=hgvfs,
596 hgvfs=hgvfs,
597 requirements=requirements,
597 requirements=requirements,
598 supportedrequirements=supportedrequirements,
598 supportedrequirements=supportedrequirements,
599 sharedpath=storebasepath,
599 sharedpath=storebasepath,
600 store=store,
600 store=store,
601 cachevfs=cachevfs,
601 cachevfs=cachevfs,
602 wcachevfs=wcachevfs,
602 wcachevfs=wcachevfs,
603 features=features,
603 features=features,
604 intents=intents)
604 intents=intents)
605
605
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
607 """Load hgrc files/content into a ui instance.
607 """Load hgrc files/content into a ui instance.
608
608
609 This is called during repository opening to load any additional
609 This is called during repository opening to load any additional
610 config files or settings relevant to the current repository.
610 config files or settings relevant to the current repository.
611
611
612 Returns a bool indicating whether any additional configs were loaded.
612 Returns a bool indicating whether any additional configs were loaded.
613
613
614 Extensions should monkeypatch this function to modify how per-repo
614 Extensions should monkeypatch this function to modify how per-repo
615 configs are loaded. For example, an extension may wish to pull in
615 configs are loaded. For example, an extension may wish to pull in
616 configs from alternate files or sources.
616 configs from alternate files or sources.
617 """
617 """
618 try:
618 try:
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
620 return True
620 return True
621 except IOError:
621 except IOError:
622 return False
622 return False
623
623
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
625 """Perform additional actions after .hg/hgrc is loaded.
625 """Perform additional actions after .hg/hgrc is loaded.
626
626
627 This function is called during repository loading immediately after
627 This function is called during repository loading immediately after
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
629
629
630 The function can be used to validate configs, automatically add
630 The function can be used to validate configs, automatically add
631 options (including extensions) based on requirements, etc.
631 options (including extensions) based on requirements, etc.
632 """
632 """
633
633
634 # Map of requirements to list of extensions to load automatically when
634 # Map of requirements to list of extensions to load automatically when
635 # requirement is present.
635 # requirement is present.
636 autoextensions = {
636 autoextensions = {
637 b'largefiles': [b'largefiles'],
637 b'largefiles': [b'largefiles'],
638 b'lfs': [b'lfs'],
638 b'lfs': [b'lfs'],
639 }
639 }
640
640
641 for requirement, names in sorted(autoextensions.items()):
641 for requirement, names in sorted(autoextensions.items()):
642 if requirement not in requirements:
642 if requirement not in requirements:
643 continue
643 continue
644
644
645 for name in names:
645 for name in names:
646 if not ui.hasconfig(b'extensions', name):
646 if not ui.hasconfig(b'extensions', name):
647 ui.setconfig(b'extensions', name, b'', source='autoload')
647 ui.setconfig(b'extensions', name, b'', source='autoload')
648
648
649 def gathersupportedrequirements(ui):
649 def gathersupportedrequirements(ui):
650 """Determine the complete set of recognized requirements."""
650 """Determine the complete set of recognized requirements."""
651 # Start with all requirements supported by this file.
651 # Start with all requirements supported by this file.
652 supported = set(localrepository._basesupported)
652 supported = set(localrepository._basesupported)
653
653
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
655 # relevant to this ui instance.
655 # relevant to this ui instance.
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
657
657
658 for fn in featuresetupfuncs:
658 for fn in featuresetupfuncs:
659 if fn.__module__ in modules:
659 if fn.__module__ in modules:
660 fn(ui, supported)
660 fn(ui, supported)
661
661
662 # Add derived requirements from registered compression engines.
662 # Add derived requirements from registered compression engines.
663 for name in util.compengines:
663 for name in util.compengines:
664 engine = util.compengines[name]
664 engine = util.compengines[name]
665 if engine.available() and engine.revlogheader():
665 if engine.available() and engine.revlogheader():
666 supported.add(b'exp-compression-%s' % name)
666 supported.add(b'exp-compression-%s' % name)
667 if engine.name() == 'zstd':
667 if engine.name() == 'zstd':
668 supported.add(b'revlog-compression-zstd')
668 supported.add(b'revlog-compression-zstd')
669
669
670 return supported
670 return supported
671
671
672 def ensurerequirementsrecognized(requirements, supported):
672 def ensurerequirementsrecognized(requirements, supported):
673 """Validate that a set of local requirements is recognized.
673 """Validate that a set of local requirements is recognized.
674
674
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
676 exists any requirement in that set that currently loaded code doesn't
676 exists any requirement in that set that currently loaded code doesn't
677 recognize.
677 recognize.
678
678
679 Returns a set of supported requirements.
679 Returns a set of supported requirements.
680 """
680 """
681 missing = set()
681 missing = set()
682
682
683 for requirement in requirements:
683 for requirement in requirements:
684 if requirement in supported:
684 if requirement in supported:
685 continue
685 continue
686
686
687 if not requirement or not requirement[0:1].isalnum():
687 if not requirement or not requirement[0:1].isalnum():
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
689
689
690 missing.add(requirement)
690 missing.add(requirement)
691
691
692 if missing:
692 if missing:
693 raise error.RequirementError(
693 raise error.RequirementError(
694 _(b'repository requires features unknown to this Mercurial: %s') %
694 _(b'repository requires features unknown to this Mercurial: %s') %
695 b' '.join(sorted(missing)),
695 b' '.join(sorted(missing)),
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
697 b'for more information'))
697 b'for more information'))
698
698
699 def ensurerequirementscompatible(ui, requirements):
699 def ensurerequirementscompatible(ui, requirements):
700 """Validates that a set of recognized requirements is mutually compatible.
700 """Validates that a set of recognized requirements is mutually compatible.
701
701
702 Some requirements may not be compatible with others or require
702 Some requirements may not be compatible with others or require
703 config options that aren't enabled. This function is called during
703 config options that aren't enabled. This function is called during
704 repository opening to ensure that the set of requirements needed
704 repository opening to ensure that the set of requirements needed
705 to open a repository is sane and compatible with config options.
705 to open a repository is sane and compatible with config options.
706
706
707 Extensions can monkeypatch this function to perform additional
707 Extensions can monkeypatch this function to perform additional
708 checking.
708 checking.
709
709
710 ``error.RepoError`` should be raised on failure.
710 ``error.RepoError`` should be raised on failure.
711 """
711 """
712 if b'exp-sparse' in requirements and not sparse.enabled:
712 if b'exp-sparse' in requirements and not sparse.enabled:
713 raise error.RepoError(_(b'repository is using sparse feature but '
713 raise error.RepoError(_(b'repository is using sparse feature but '
714 b'sparse is not enabled; enable the '
714 b'sparse is not enabled; enable the '
715 b'"sparse" extensions to access'))
715 b'"sparse" extensions to access'))
716
716
717 def makestore(requirements, path, vfstype):
717 def makestore(requirements, path, vfstype):
718 """Construct a storage object for a repository."""
718 """Construct a storage object for a repository."""
719 if b'store' in requirements:
719 if b'store' in requirements:
720 if b'fncache' in requirements:
720 if b'fncache' in requirements:
721 return storemod.fncachestore(path, vfstype,
721 return storemod.fncachestore(path, vfstype,
722 b'dotencode' in requirements)
722 b'dotencode' in requirements)
723
723
724 return storemod.encodedstore(path, vfstype)
724 return storemod.encodedstore(path, vfstype)
725
725
726 return storemod.basicstore(path, vfstype)
726 return storemod.basicstore(path, vfstype)
727
727
728 def resolvestorevfsoptions(ui, requirements, features):
728 def resolvestorevfsoptions(ui, requirements, features):
729 """Resolve the options to pass to the store vfs opener.
729 """Resolve the options to pass to the store vfs opener.
730
730
731 The returned dict is used to influence behavior of the storage layer.
731 The returned dict is used to influence behavior of the storage layer.
732 """
732 """
733 options = {}
733 options = {}
734
734
735 if b'treemanifest' in requirements:
735 if b'treemanifest' in requirements:
736 options[b'treemanifest'] = True
736 options[b'treemanifest'] = True
737
737
738 # experimental config: format.manifestcachesize
738 # experimental config: format.manifestcachesize
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
740 if manifestcachesize is not None:
740 if manifestcachesize is not None:
741 options[b'manifestcachesize'] = manifestcachesize
741 options[b'manifestcachesize'] = manifestcachesize
742
742
743 # In the absence of another requirement superseding a revlog-related
743 # In the absence of another requirement superseding a revlog-related
744 # requirement, we have to assume the repo is using revlog version 0.
744 # requirement, we have to assume the repo is using revlog version 0.
745 # This revlog format is super old and we don't bother trying to parse
745 # This revlog format is super old and we don't bother trying to parse
746 # opener options for it because those options wouldn't do anything
746 # opener options for it because those options wouldn't do anything
747 # meaningful on such old repos.
747 # meaningful on such old repos.
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
750
750
751 return options
751 return options
752
752
753 def resolverevlogstorevfsoptions(ui, requirements, features):
753 def resolverevlogstorevfsoptions(ui, requirements, features):
754 """Resolve opener options specific to revlogs."""
754 """Resolve opener options specific to revlogs."""
755
755
756 options = {}
756 options = {}
757 options[b'flagprocessors'] = {}
757 options[b'flagprocessors'] = {}
758
758
759 if b'revlogv1' in requirements:
759 if b'revlogv1' in requirements:
760 options[b'revlogv1'] = True
760 options[b'revlogv1'] = True
761 if REVLOGV2_REQUIREMENT in requirements:
761 if REVLOGV2_REQUIREMENT in requirements:
762 options[b'revlogv2'] = True
762 options[b'revlogv2'] = True
763
763
764 if b'generaldelta' in requirements:
764 if b'generaldelta' in requirements:
765 options[b'generaldelta'] = True
765 options[b'generaldelta'] = True
766
766
767 # experimental config: format.chunkcachesize
767 # experimental config: format.chunkcachesize
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
769 if chunkcachesize is not None:
769 if chunkcachesize is not None:
770 options[b'chunkcachesize'] = chunkcachesize
770 options[b'chunkcachesize'] = chunkcachesize
771
771
772 deltabothparents = ui.configbool(b'storage',
772 deltabothparents = ui.configbool(b'storage',
773 b'revlog.optimize-delta-parent-choice')
773 b'revlog.optimize-delta-parent-choice')
774 options[b'deltabothparents'] = deltabothparents
774 options[b'deltabothparents'] = deltabothparents
775
775
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
777 lazydeltabase = False
777 lazydeltabase = False
778 if lazydelta:
778 if lazydelta:
779 lazydeltabase = ui.configbool(b'storage',
779 lazydeltabase = ui.configbool(b'storage',
780 b'revlog.reuse-external-delta-parent')
780 b'revlog.reuse-external-delta-parent')
781 if lazydeltabase is None:
781 if lazydeltabase is None:
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
783 options[b'lazydelta'] = lazydelta
783 options[b'lazydelta'] = lazydelta
784 options[b'lazydeltabase'] = lazydeltabase
784 options[b'lazydeltabase'] = lazydeltabase
785
785
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
787 if 0 <= chainspan:
787 if 0 <= chainspan:
788 options[b'maxdeltachainspan'] = chainspan
788 options[b'maxdeltachainspan'] = chainspan
789
789
790 mmapindexthreshold = ui.configbytes(b'experimental',
790 mmapindexthreshold = ui.configbytes(b'experimental',
791 b'mmapindexthreshold')
791 b'mmapindexthreshold')
792 if mmapindexthreshold is not None:
792 if mmapindexthreshold is not None:
793 options[b'mmapindexthreshold'] = mmapindexthreshold
793 options[b'mmapindexthreshold'] = mmapindexthreshold
794
794
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
796 srdensitythres = float(ui.config(b'experimental',
796 srdensitythres = float(ui.config(b'experimental',
797 b'sparse-read.density-threshold'))
797 b'sparse-read.density-threshold'))
798 srmingapsize = ui.configbytes(b'experimental',
798 srmingapsize = ui.configbytes(b'experimental',
799 b'sparse-read.min-gap-size')
799 b'sparse-read.min-gap-size')
800 options[b'with-sparse-read'] = withsparseread
800 options[b'with-sparse-read'] = withsparseread
801 options[b'sparse-read-density-threshold'] = srdensitythres
801 options[b'sparse-read-density-threshold'] = srdensitythres
802 options[b'sparse-read-min-gap-size'] = srmingapsize
802 options[b'sparse-read-min-gap-size'] = srmingapsize
803
803
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
805 options[b'sparse-revlog'] = sparserevlog
805 options[b'sparse-revlog'] = sparserevlog
806 if sparserevlog:
806 if sparserevlog:
807 options[b'generaldelta'] = True
807 options[b'generaldelta'] = True
808
808
809 maxchainlen = None
809 maxchainlen = None
810 if sparserevlog:
810 if sparserevlog:
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
812 # experimental config: format.maxchainlen
812 # experimental config: format.maxchainlen
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
814 if maxchainlen is not None:
814 if maxchainlen is not None:
815 options[b'maxchainlen'] = maxchainlen
815 options[b'maxchainlen'] = maxchainlen
816
816
817 for r in requirements:
817 for r in requirements:
818 # we allow multiple compression engine requirement to co-exist because
818 # we allow multiple compression engine requirement to co-exist because
819 # strickly speaking, revlog seems to support mixed compression style.
819 # strickly speaking, revlog seems to support mixed compression style.
820 #
820 #
821 # The compression used for new entries will be "the last one"
821 # The compression used for new entries will be "the last one"
822 prefix = r.startswith
822 prefix = r.startswith
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
824 options[b'compengine'] = r.split('-', 2)[2]
824 options[b'compengine'] = r.split('-', 2)[2]
825
825
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
827 if options[b'zlib.level'] is not None:
827 if options[b'zlib.level'] is not None:
828 if not (0 <= options[b'zlib.level'] <= 9):
828 if not (0 <= options[b'zlib.level'] <= 9):
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
830 raise error.Abort(msg % options[b'zlib.level'])
830 raise error.Abort(msg % options[b'zlib.level'])
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
832 if options[b'zstd.level'] is not None:
832 if options[b'zstd.level'] is not None:
833 if not (0 <= options[b'zstd.level'] <= 22):
833 if not (0 <= options[b'zstd.level'] <= 22):
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
835 raise error.Abort(msg % options[b'zstd.level'])
835 raise error.Abort(msg % options[b'zstd.level'])
836
836
837 if repository.NARROW_REQUIREMENT in requirements:
837 if repository.NARROW_REQUIREMENT in requirements:
838 options[b'enableellipsis'] = True
838 options[b'enableellipsis'] = True
839
839
840 return options
840 return options
841
841
842 def makemain(**kwargs):
842 def makemain(**kwargs):
843 """Produce a type conforming to ``ilocalrepositorymain``."""
843 """Produce a type conforming to ``ilocalrepositorymain``."""
844 return localrepository
844 return localrepository
845
845
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
847 class revlogfilestorage(object):
847 class revlogfilestorage(object):
848 """File storage when using revlogs."""
848 """File storage when using revlogs."""
849
849
850 def file(self, path):
850 def file(self, path):
851 if path[0] == b'/':
851 if path[0] == b'/':
852 path = path[1:]
852 path = path[1:]
853
853
854 return filelog.filelog(self.svfs, path)
854 return filelog.filelog(self.svfs, path)
855
855
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
857 class revlognarrowfilestorage(object):
857 class revlognarrowfilestorage(object):
858 """File storage when using revlogs and narrow files."""
858 """File storage when using revlogs and narrow files."""
859
859
860 def file(self, path):
860 def file(self, path):
861 if path[0] == b'/':
861 if path[0] == b'/':
862 path = path[1:]
862 path = path[1:]
863
863
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
865
865
866 def makefilestorage(requirements, features, **kwargs):
866 def makefilestorage(requirements, features, **kwargs):
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
870
870
871 if repository.NARROW_REQUIREMENT in requirements:
871 if repository.NARROW_REQUIREMENT in requirements:
872 return revlognarrowfilestorage
872 return revlognarrowfilestorage
873 else:
873 else:
874 return revlogfilestorage
874 return revlogfilestorage
875
875
876 # List of repository interfaces and factory functions for them. Each
876 # List of repository interfaces and factory functions for them. Each
877 # will be called in order during ``makelocalrepository()`` to iteratively
877 # will be called in order during ``makelocalrepository()`` to iteratively
878 # derive the final type for a local repository instance. We capture the
878 # derive the final type for a local repository instance. We capture the
879 # function as a lambda so we don't hold a reference and the module-level
879 # function as a lambda so we don't hold a reference and the module-level
880 # functions can be wrapped.
880 # functions can be wrapped.
881 REPO_INTERFACES = [
881 REPO_INTERFACES = [
882 (repository.ilocalrepositorymain, lambda: makemain),
882 (repository.ilocalrepositorymain, lambda: makemain),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
884 ]
884 ]
885
885
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
887 class localrepository(object):
887 class localrepository(object):
888 """Main class for representing local repositories.
888 """Main class for representing local repositories.
889
889
890 All local repositories are instances of this class.
890 All local repositories are instances of this class.
891
891
892 Constructed on its own, instances of this class are not usable as
892 Constructed on its own, instances of this class are not usable as
893 repository objects. To obtain a usable repository object, call
893 repository objects. To obtain a usable repository object, call
894 ``hg.repository()``, ``localrepo.instance()``, or
894 ``hg.repository()``, ``localrepo.instance()``, or
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
896 ``instance()`` adds support for creating new repositories.
896 ``instance()`` adds support for creating new repositories.
897 ``hg.repository()`` adds more extension integration, including calling
897 ``hg.repository()`` adds more extension integration, including calling
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
899 used.
899 used.
900 """
900 """
901
901
902 # obsolete experimental requirements:
902 # obsolete experimental requirements:
903 # - manifestv2: An experimental new manifest format that allowed
903 # - manifestv2: An experimental new manifest format that allowed
904 # for stem compression of long paths. Experiment ended up not
904 # for stem compression of long paths. Experiment ended up not
905 # being successful (repository sizes went up due to worse delta
905 # being successful (repository sizes went up due to worse delta
906 # chains), and the code was deleted in 4.6.
906 # chains), and the code was deleted in 4.6.
907 supportedformats = {
907 supportedformats = {
908 'revlogv1',
908 'revlogv1',
909 'generaldelta',
909 'generaldelta',
910 'treemanifest',
910 'treemanifest',
911 REVLOGV2_REQUIREMENT,
911 REVLOGV2_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
913 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
913 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
914 }
914 }
915 _basesupported = supportedformats | {
915 _basesupported = supportedformats | {
916 'store',
916 'store',
917 'fncache',
917 'fncache',
918 'shared',
918 'shared',
919 'relshared',
919 'relshared',
920 'dotencode',
920 'dotencode',
921 'exp-sparse',
921 'exp-sparse',
922 'internal-phase'
922 'internal-phase'
923 }
923 }
924
924
925 # list of prefix for file which can be written without 'wlock'
925 # list of prefix for file which can be written without 'wlock'
926 # Extensions should extend this list when needed
926 # Extensions should extend this list when needed
927 _wlockfreeprefix = {
927 _wlockfreeprefix = {
928 # We migh consider requiring 'wlock' for the next
928 # We migh consider requiring 'wlock' for the next
929 # two, but pretty much all the existing code assume
929 # two, but pretty much all the existing code assume
930 # wlock is not needed so we keep them excluded for
930 # wlock is not needed so we keep them excluded for
931 # now.
931 # now.
932 'hgrc',
932 'hgrc',
933 'requires',
933 'requires',
934 # XXX cache is a complicatged business someone
934 # XXX cache is a complicatged business someone
935 # should investigate this in depth at some point
935 # should investigate this in depth at some point
936 'cache/',
936 'cache/',
937 # XXX shouldn't be dirstate covered by the wlock?
937 # XXX shouldn't be dirstate covered by the wlock?
938 'dirstate',
938 'dirstate',
939 # XXX bisect was still a bit too messy at the time
939 # XXX bisect was still a bit too messy at the time
940 # this changeset was introduced. Someone should fix
940 # this changeset was introduced. Someone should fix
941 # the remainig bit and drop this line
941 # the remainig bit and drop this line
942 'bisect.state',
942 'bisect.state',
943 }
943 }
944
944
945 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
945 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
946 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
946 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
947 features, intents=None):
947 features, intents=None):
948 """Create a new local repository instance.
948 """Create a new local repository instance.
949
949
950 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
950 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
951 or ``localrepo.makelocalrepository()`` for obtaining a new repository
951 or ``localrepo.makelocalrepository()`` for obtaining a new repository
952 object.
952 object.
953
953
954 Arguments:
954 Arguments:
955
955
956 baseui
956 baseui
957 ``ui.ui`` instance that ``ui`` argument was based off of.
957 ``ui.ui`` instance that ``ui`` argument was based off of.
958
958
959 ui
959 ui
960 ``ui.ui`` instance for use by the repository.
960 ``ui.ui`` instance for use by the repository.
961
961
962 origroot
962 origroot
963 ``bytes`` path to working directory root of this repository.
963 ``bytes`` path to working directory root of this repository.
964
964
965 wdirvfs
965 wdirvfs
966 ``vfs.vfs`` rooted at the working directory.
966 ``vfs.vfs`` rooted at the working directory.
967
967
968 hgvfs
968 hgvfs
969 ``vfs.vfs`` rooted at .hg/
969 ``vfs.vfs`` rooted at .hg/
970
970
971 requirements
971 requirements
972 ``set`` of bytestrings representing repository opening requirements.
972 ``set`` of bytestrings representing repository opening requirements.
973
973
974 supportedrequirements
974 supportedrequirements
975 ``set`` of bytestrings representing repository requirements that we
975 ``set`` of bytestrings representing repository requirements that we
976 know how to open. May be a supetset of ``requirements``.
976 know how to open. May be a supetset of ``requirements``.
977
977
978 sharedpath
978 sharedpath
979 ``bytes`` Defining path to storage base directory. Points to a
979 ``bytes`` Defining path to storage base directory. Points to a
980 ``.hg/`` directory somewhere.
980 ``.hg/`` directory somewhere.
981
981
982 store
982 store
983 ``store.basicstore`` (or derived) instance providing access to
983 ``store.basicstore`` (or derived) instance providing access to
984 versioned storage.
984 versioned storage.
985
985
986 cachevfs
986 cachevfs
987 ``vfs.vfs`` used for cache files.
987 ``vfs.vfs`` used for cache files.
988
988
989 wcachevfs
989 wcachevfs
990 ``vfs.vfs`` used for cache files related to the working copy.
990 ``vfs.vfs`` used for cache files related to the working copy.
991
991
992 features
992 features
993 ``set`` of bytestrings defining features/capabilities of this
993 ``set`` of bytestrings defining features/capabilities of this
994 instance.
994 instance.
995
995
996 intents
996 intents
997 ``set`` of system strings indicating what this repo will be used
997 ``set`` of system strings indicating what this repo will be used
998 for.
998 for.
999 """
999 """
1000 self.baseui = baseui
1000 self.baseui = baseui
1001 self.ui = ui
1001 self.ui = ui
1002 self.origroot = origroot
1002 self.origroot = origroot
1003 # vfs rooted at working directory.
1003 # vfs rooted at working directory.
1004 self.wvfs = wdirvfs
1004 self.wvfs = wdirvfs
1005 self.root = wdirvfs.base
1005 self.root = wdirvfs.base
1006 # vfs rooted at .hg/. Used to access most non-store paths.
1006 # vfs rooted at .hg/. Used to access most non-store paths.
1007 self.vfs = hgvfs
1007 self.vfs = hgvfs
1008 self.path = hgvfs.base
1008 self.path = hgvfs.base
1009 self.requirements = requirements
1009 self.requirements = requirements
1010 self.supported = supportedrequirements
1010 self.supported = supportedrequirements
1011 self.sharedpath = sharedpath
1011 self.sharedpath = sharedpath
1012 self.store = store
1012 self.store = store
1013 self.cachevfs = cachevfs
1013 self.cachevfs = cachevfs
1014 self.wcachevfs = wcachevfs
1014 self.wcachevfs = wcachevfs
1015 self.features = features
1015 self.features = features
1016
1016
1017 self.filtername = None
1017 self.filtername = None
1018
1018
1019 if (self.ui.configbool('devel', 'all-warnings') or
1019 if (self.ui.configbool('devel', 'all-warnings') or
1020 self.ui.configbool('devel', 'check-locks')):
1020 self.ui.configbool('devel', 'check-locks')):
1021 self.vfs.audit = self._getvfsward(self.vfs.audit)
1021 self.vfs.audit = self._getvfsward(self.vfs.audit)
1022 # A list of callback to shape the phase if no data were found.
1022 # A list of callback to shape the phase if no data were found.
1023 # Callback are in the form: func(repo, roots) --> processed root.
1023 # Callback are in the form: func(repo, roots) --> processed root.
1024 # This list it to be filled by extension during repo setup
1024 # This list it to be filled by extension during repo setup
1025 self._phasedefaults = []
1025 self._phasedefaults = []
1026
1026
1027 color.setup(self.ui)
1027 color.setup(self.ui)
1028
1028
1029 self.spath = self.store.path
1029 self.spath = self.store.path
1030 self.svfs = self.store.vfs
1030 self.svfs = self.store.vfs
1031 self.sjoin = self.store.join
1031 self.sjoin = self.store.join
1032 if (self.ui.configbool('devel', 'all-warnings') or
1032 if (self.ui.configbool('devel', 'all-warnings') or
1033 self.ui.configbool('devel', 'check-locks')):
1033 self.ui.configbool('devel', 'check-locks')):
1034 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1034 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1035 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1035 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1036 else: # standard vfs
1036 else: # standard vfs
1037 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1037 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1038
1038
1039 self._dirstatevalidatewarned = False
1039 self._dirstatevalidatewarned = False
1040
1040
1041 self._branchcaches = branchmap.BranchMapCache()
1041 self._branchcaches = branchmap.BranchMapCache()
1042 self._revbranchcache = None
1042 self._revbranchcache = None
1043 self._filterpats = {}
1043 self._filterpats = {}
1044 self._datafilters = {}
1044 self._datafilters = {}
1045 self._transref = self._lockref = self._wlockref = None
1045 self._transref = self._lockref = self._wlockref = None
1046
1046
1047 # A cache for various files under .hg/ that tracks file changes,
1047 # A cache for various files under .hg/ that tracks file changes,
1048 # (used by the filecache decorator)
1048 # (used by the filecache decorator)
1049 #
1049 #
1050 # Maps a property name to its util.filecacheentry
1050 # Maps a property name to its util.filecacheentry
1051 self._filecache = {}
1051 self._filecache = {}
1052
1052
1053 # hold sets of revision to be filtered
1053 # hold sets of revision to be filtered
1054 # should be cleared when something might have changed the filter value:
1054 # should be cleared when something might have changed the filter value:
1055 # - new changesets,
1055 # - new changesets,
1056 # - phase change,
1056 # - phase change,
1057 # - new obsolescence marker,
1057 # - new obsolescence marker,
1058 # - working directory parent change,
1058 # - working directory parent change,
1059 # - bookmark changes
1059 # - bookmark changes
1060 self.filteredrevcache = {}
1060 self.filteredrevcache = {}
1061
1061
1062 # post-dirstate-status hooks
1062 # post-dirstate-status hooks
1063 self._postdsstatus = []
1063 self._postdsstatus = []
1064
1064
1065 # generic mapping between names and nodes
1065 # generic mapping between names and nodes
1066 self.names = namespaces.namespaces()
1066 self.names = namespaces.namespaces()
1067
1067
1068 # Key to signature value.
1068 # Key to signature value.
1069 self._sparsesignaturecache = {}
1069 self._sparsesignaturecache = {}
1070 # Signature to cached matcher instance.
1070 # Signature to cached matcher instance.
1071 self._sparsematchercache = {}
1071 self._sparsematchercache = {}
1072
1072
1073 self._extrafilterid = repoview.extrafilter(ui)
1073 self._extrafilterid = repoview.extrafilter(ui)
1074
1074
1075 def _getvfsward(self, origfunc):
1075 def _getvfsward(self, origfunc):
1076 """build a ward for self.vfs"""
1076 """build a ward for self.vfs"""
1077 rref = weakref.ref(self)
1077 rref = weakref.ref(self)
1078 def checkvfs(path, mode=None):
1078 def checkvfs(path, mode=None):
1079 ret = origfunc(path, mode=mode)
1079 ret = origfunc(path, mode=mode)
1080 repo = rref()
1080 repo = rref()
1081 if (repo is None
1081 if (repo is None
1082 or not util.safehasattr(repo, '_wlockref')
1082 or not util.safehasattr(repo, '_wlockref')
1083 or not util.safehasattr(repo, '_lockref')):
1083 or not util.safehasattr(repo, '_lockref')):
1084 return
1084 return
1085 if mode in (None, 'r', 'rb'):
1085 if mode in (None, 'r', 'rb'):
1086 return
1086 return
1087 if path.startswith(repo.path):
1087 if path.startswith(repo.path):
1088 # truncate name relative to the repository (.hg)
1088 # truncate name relative to the repository (.hg)
1089 path = path[len(repo.path) + 1:]
1089 path = path[len(repo.path) + 1:]
1090 if path.startswith('cache/'):
1090 if path.startswith('cache/'):
1091 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1091 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1092 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1092 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1093 if path.startswith('journal.') or path.startswith('undo.'):
1093 if path.startswith('journal.') or path.startswith('undo.'):
1094 # journal is covered by 'lock'
1094 # journal is covered by 'lock'
1095 if repo._currentlock(repo._lockref) is None:
1095 if repo._currentlock(repo._lockref) is None:
1096 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 repo.ui.develwarn('write with no lock: "%s"' % path,
1097 stacklevel=3, config='check-locks')
1097 stacklevel=3, config='check-locks')
1098 elif repo._currentlock(repo._wlockref) is None:
1098 elif repo._currentlock(repo._wlockref) is None:
1099 # rest of vfs files are covered by 'wlock'
1099 # rest of vfs files are covered by 'wlock'
1100 #
1100 #
1101 # exclude special files
1101 # exclude special files
1102 for prefix in self._wlockfreeprefix:
1102 for prefix in self._wlockfreeprefix:
1103 if path.startswith(prefix):
1103 if path.startswith(prefix):
1104 return
1104 return
1105 repo.ui.develwarn('write with no wlock: "%s"' % path,
1105 repo.ui.develwarn('write with no wlock: "%s"' % path,
1106 stacklevel=3, config='check-locks')
1106 stacklevel=3, config='check-locks')
1107 return ret
1107 return ret
1108 return checkvfs
1108 return checkvfs
1109
1109
1110 def _getsvfsward(self, origfunc):
1110 def _getsvfsward(self, origfunc):
1111 """build a ward for self.svfs"""
1111 """build a ward for self.svfs"""
1112 rref = weakref.ref(self)
1112 rref = weakref.ref(self)
1113 def checksvfs(path, mode=None):
1113 def checksvfs(path, mode=None):
1114 ret = origfunc(path, mode=mode)
1114 ret = origfunc(path, mode=mode)
1115 repo = rref()
1115 repo = rref()
1116 if repo is None or not util.safehasattr(repo, '_lockref'):
1116 if repo is None or not util.safehasattr(repo, '_lockref'):
1117 return
1117 return
1118 if mode in (None, 'r', 'rb'):
1118 if mode in (None, 'r', 'rb'):
1119 return
1119 return
1120 if path.startswith(repo.sharedpath):
1120 if path.startswith(repo.sharedpath):
1121 # truncate name relative to the repository (.hg)
1121 # truncate name relative to the repository (.hg)
1122 path = path[len(repo.sharedpath) + 1:]
1122 path = path[len(repo.sharedpath) + 1:]
1123 if repo._currentlock(repo._lockref) is None:
1123 if repo._currentlock(repo._lockref) is None:
1124 repo.ui.develwarn('write with no lock: "%s"' % path,
1124 repo.ui.develwarn('write with no lock: "%s"' % path,
1125 stacklevel=4)
1125 stacklevel=4)
1126 return ret
1126 return ret
1127 return checksvfs
1127 return checksvfs
1128
1128
1129 def close(self):
1129 def close(self):
1130 self._writecaches()
1130 self._writecaches()
1131
1131
1132 def _writecaches(self):
1132 def _writecaches(self):
1133 if self._revbranchcache:
1133 if self._revbranchcache:
1134 self._revbranchcache.write()
1134 self._revbranchcache.write()
1135
1135
1136 def _restrictcapabilities(self, caps):
1136 def _restrictcapabilities(self, caps):
1137 if self.ui.configbool('experimental', 'bundle2-advertise'):
1137 if self.ui.configbool('experimental', 'bundle2-advertise'):
1138 caps = set(caps)
1138 caps = set(caps)
1139 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1139 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1140 role='client'))
1140 role='client'))
1141 caps.add('bundle2=' + urlreq.quote(capsblob))
1141 caps.add('bundle2=' + urlreq.quote(capsblob))
1142 return caps
1142 return caps
1143
1143
1144 def _writerequirements(self):
1144 def _writerequirements(self):
1145 scmutil.writerequires(self.vfs, self.requirements)
1145 scmutil.writerequires(self.vfs, self.requirements)
1146
1146
1147 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1147 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1148 # self -> auditor -> self._checknested -> self
1148 # self -> auditor -> self._checknested -> self
1149
1149
1150 @property
1150 @property
1151 def auditor(self):
1151 def auditor(self):
1152 # This is only used by context.workingctx.match in order to
1152 # This is only used by context.workingctx.match in order to
1153 # detect files in subrepos.
1153 # detect files in subrepos.
1154 return pathutil.pathauditor(self.root, callback=self._checknested)
1154 return pathutil.pathauditor(self.root, callback=self._checknested)
1155
1155
1156 @property
1156 @property
1157 def nofsauditor(self):
1157 def nofsauditor(self):
1158 # This is only used by context.basectx.match in order to detect
1158 # This is only used by context.basectx.match in order to detect
1159 # files in subrepos.
1159 # files in subrepos.
1160 return pathutil.pathauditor(self.root, callback=self._checknested,
1160 return pathutil.pathauditor(self.root, callback=self._checknested,
1161 realfs=False, cached=True)
1161 realfs=False, cached=True)
1162
1162
1163 def _checknested(self, path):
1163 def _checknested(self, path):
1164 """Determine if path is a legal nested repository."""
1164 """Determine if path is a legal nested repository."""
1165 if not path.startswith(self.root):
1165 if not path.startswith(self.root):
1166 return False
1166 return False
1167 subpath = path[len(self.root) + 1:]
1167 subpath = path[len(self.root) + 1:]
1168 normsubpath = util.pconvert(subpath)
1168 normsubpath = util.pconvert(subpath)
1169
1169
1170 # XXX: Checking against the current working copy is wrong in
1170 # XXX: Checking against the current working copy is wrong in
1171 # the sense that it can reject things like
1171 # the sense that it can reject things like
1172 #
1172 #
1173 # $ hg cat -r 10 sub/x.txt
1173 # $ hg cat -r 10 sub/x.txt
1174 #
1174 #
1175 # if sub/ is no longer a subrepository in the working copy
1175 # if sub/ is no longer a subrepository in the working copy
1176 # parent revision.
1176 # parent revision.
1177 #
1177 #
1178 # However, it can of course also allow things that would have
1178 # However, it can of course also allow things that would have
1179 # been rejected before, such as the above cat command if sub/
1179 # been rejected before, such as the above cat command if sub/
1180 # is a subrepository now, but was a normal directory before.
1180 # is a subrepository now, but was a normal directory before.
1181 # The old path auditor would have rejected by mistake since it
1181 # The old path auditor would have rejected by mistake since it
1182 # panics when it sees sub/.hg/.
1182 # panics when it sees sub/.hg/.
1183 #
1183 #
1184 # All in all, checking against the working copy seems sensible
1184 # All in all, checking against the working copy seems sensible
1185 # since we want to prevent access to nested repositories on
1185 # since we want to prevent access to nested repositories on
1186 # the filesystem *now*.
1186 # the filesystem *now*.
1187 ctx = self[None]
1187 ctx = self[None]
1188 parts = util.splitpath(subpath)
1188 parts = util.splitpath(subpath)
1189 while parts:
1189 while parts:
1190 prefix = '/'.join(parts)
1190 prefix = '/'.join(parts)
1191 if prefix in ctx.substate:
1191 if prefix in ctx.substate:
1192 if prefix == normsubpath:
1192 if prefix == normsubpath:
1193 return True
1193 return True
1194 else:
1194 else:
1195 sub = ctx.sub(prefix)
1195 sub = ctx.sub(prefix)
1196 return sub.checknested(subpath[len(prefix) + 1:])
1196 return sub.checknested(subpath[len(prefix) + 1:])
1197 else:
1197 else:
1198 parts.pop()
1198 parts.pop()
1199 return False
1199 return False
1200
1200
1201 def peer(self):
1201 def peer(self):
1202 return localpeer(self) # not cached to avoid reference cycle
1202 return localpeer(self) # not cached to avoid reference cycle
1203
1203
1204 def unfiltered(self):
1204 def unfiltered(self):
1205 """Return unfiltered version of the repository
1205 """Return unfiltered version of the repository
1206
1206
1207 Intended to be overwritten by filtered repo."""
1207 Intended to be overwritten by filtered repo."""
1208 return self
1208 return self
1209
1209
1210 def filtered(self, name, visibilityexceptions=None):
1210 def filtered(self, name, visibilityexceptions=None):
1211 """Return a filtered version of a repository
1211 """Return a filtered version of a repository
1212
1212
1213 The `name` parameter is the identifier of the requested view. This
1213 The `name` parameter is the identifier of the requested view. This
1214 will return a repoview object set "exactly" to the specified view.
1214 will return a repoview object set "exactly" to the specified view.
1215
1215
1216 This function does not apply recursive filtering to a repository. For
1216 This function does not apply recursive filtering to a repository. For
1217 example calling `repo.filtered("served")` will return a repoview using
1217 example calling `repo.filtered("served")` will return a repoview using
1218 the "served" view, regardless of the initial view used by `repo`.
1218 the "served" view, regardless of the initial view used by `repo`.
1219
1219
1220 In other word, there is always only one level of `repoview` "filtering".
1220 In other word, there is always only one level of `repoview` "filtering".
1221 """
1221 """
1222 if self._extrafilterid is not None and '%' not in name:
1222 if self._extrafilterid is not None and '%' not in name:
1223 name = name + '%' + self._extrafilterid
1223 name = name + '%' + self._extrafilterid
1224
1224
1225 cls = repoview.newtype(self.unfiltered().__class__)
1225 cls = repoview.newtype(self.unfiltered().__class__)
1226 return cls(self, name, visibilityexceptions)
1226 return cls(self, name, visibilityexceptions)
1227
1227
1228 @mixedrepostorecache(('bookmarks', ''), ('bookmarks.current', ''),
1228 @mixedrepostorecache(('bookmarks', ''), ('bookmarks.current', ''),
1229 ('bookmarks', 'store'), ('00changelog.i', 'store'))
1229 ('bookmarks', 'store'), ('00changelog.i', 'store'))
1230 def _bookmarks(self):
1230 def _bookmarks(self):
1231 return bookmarks.bmstore(self)
1231 return bookmarks.bmstore(self)
1232
1232
1233 @property
1233 @property
1234 def _activebookmark(self):
1234 def _activebookmark(self):
1235 return self._bookmarks.active
1235 return self._bookmarks.active
1236
1236
1237 # _phasesets depend on changelog. what we need is to call
1237 # _phasesets depend on changelog. what we need is to call
1238 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1238 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1239 # can't be easily expressed in filecache mechanism.
1239 # can't be easily expressed in filecache mechanism.
1240 @storecache('phaseroots', '00changelog.i')
1240 @storecache('phaseroots', '00changelog.i')
1241 def _phasecache(self):
1241 def _phasecache(self):
1242 return phases.phasecache(self, self._phasedefaults)
1242 return phases.phasecache(self, self._phasedefaults)
1243
1243
1244 @storecache('obsstore')
1244 @storecache('obsstore')
1245 def obsstore(self):
1245 def obsstore(self):
1246 return obsolete.makestore(self.ui, self)
1246 return obsolete.makestore(self.ui, self)
1247
1247
1248 @storecache('00changelog.i')
1248 @storecache('00changelog.i')
1249 def changelog(self):
1249 def changelog(self):
1250 return changelog.changelog(self.svfs,
1250 return changelog.changelog(self.svfs,
1251 trypending=txnutil.mayhavepending(self.root))
1251 trypending=txnutil.mayhavepending(self.root))
1252
1252
1253 @storecache('00manifest.i')
1253 @storecache('00manifest.i')
1254 def manifestlog(self):
1254 def manifestlog(self):
1255 rootstore = manifest.manifestrevlog(self.svfs)
1255 rootstore = manifest.manifestrevlog(self.svfs)
1256 return manifest.manifestlog(self.svfs, self, rootstore,
1256 return manifest.manifestlog(self.svfs, self, rootstore,
1257 self._storenarrowmatch)
1257 self._storenarrowmatch)
1258
1258
1259 @repofilecache('dirstate')
1259 @repofilecache('dirstate')
1260 def dirstate(self):
1260 def dirstate(self):
1261 return self._makedirstate()
1261 return self._makedirstate()
1262
1262
1263 def _makedirstate(self):
1263 def _makedirstate(self):
1264 """Extension point for wrapping the dirstate per-repo."""
1264 """Extension point for wrapping the dirstate per-repo."""
1265 sparsematchfn = lambda: sparse.matcher(self)
1265 sparsematchfn = lambda: sparse.matcher(self)
1266
1266
1267 return dirstate.dirstate(self.vfs, self.ui, self.root,
1267 return dirstate.dirstate(self.vfs, self.ui, self.root,
1268 self._dirstatevalidate, sparsematchfn)
1268 self._dirstatevalidate, sparsematchfn)
1269
1269
1270 def _dirstatevalidate(self, node):
1270 def _dirstatevalidate(self, node):
1271 try:
1271 try:
1272 self.changelog.rev(node)
1272 self.changelog.rev(node)
1273 return node
1273 return node
1274 except error.LookupError:
1274 except error.LookupError:
1275 if not self._dirstatevalidatewarned:
1275 if not self._dirstatevalidatewarned:
1276 self._dirstatevalidatewarned = True
1276 self._dirstatevalidatewarned = True
1277 self.ui.warn(_("warning: ignoring unknown"
1277 self.ui.warn(_("warning: ignoring unknown"
1278 " working parent %s!\n") % short(node))
1278 " working parent %s!\n") % short(node))
1279 return nullid
1279 return nullid
1280
1280
1281 @storecache(narrowspec.FILENAME)
1281 @storecache(narrowspec.FILENAME)
1282 def narrowpats(self):
1282 def narrowpats(self):
1283 """matcher patterns for this repository's narrowspec
1283 """matcher patterns for this repository's narrowspec
1284
1284
1285 A tuple of (includes, excludes).
1285 A tuple of (includes, excludes).
1286 """
1286 """
1287 return narrowspec.load(self)
1287 return narrowspec.load(self)
1288
1288
1289 @storecache(narrowspec.FILENAME)
1289 @storecache(narrowspec.FILENAME)
1290 def _storenarrowmatch(self):
1290 def _storenarrowmatch(self):
1291 if repository.NARROW_REQUIREMENT not in self.requirements:
1291 if repository.NARROW_REQUIREMENT not in self.requirements:
1292 return matchmod.always()
1292 return matchmod.always()
1293 include, exclude = self.narrowpats
1293 include, exclude = self.narrowpats
1294 return narrowspec.match(self.root, include=include, exclude=exclude)
1294 return narrowspec.match(self.root, include=include, exclude=exclude)
1295
1295
1296 @storecache(narrowspec.FILENAME)
1296 @storecache(narrowspec.FILENAME)
1297 def _narrowmatch(self):
1297 def _narrowmatch(self):
1298 if repository.NARROW_REQUIREMENT not in self.requirements:
1298 if repository.NARROW_REQUIREMENT not in self.requirements:
1299 return matchmod.always()
1299 return matchmod.always()
1300 narrowspec.checkworkingcopynarrowspec(self)
1300 narrowspec.checkworkingcopynarrowspec(self)
1301 include, exclude = self.narrowpats
1301 include, exclude = self.narrowpats
1302 return narrowspec.match(self.root, include=include, exclude=exclude)
1302 return narrowspec.match(self.root, include=include, exclude=exclude)
1303
1303
1304 def narrowmatch(self, match=None, includeexact=False):
1304 def narrowmatch(self, match=None, includeexact=False):
1305 """matcher corresponding the the repo's narrowspec
1305 """matcher corresponding the the repo's narrowspec
1306
1306
1307 If `match` is given, then that will be intersected with the narrow
1307 If `match` is given, then that will be intersected with the narrow
1308 matcher.
1308 matcher.
1309
1309
1310 If `includeexact` is True, then any exact matches from `match` will
1310 If `includeexact` is True, then any exact matches from `match` will
1311 be included even if they're outside the narrowspec.
1311 be included even if they're outside the narrowspec.
1312 """
1312 """
1313 if match:
1313 if match:
1314 if includeexact and not self._narrowmatch.always():
1314 if includeexact and not self._narrowmatch.always():
1315 # do not exclude explicitly-specified paths so that they can
1315 # do not exclude explicitly-specified paths so that they can
1316 # be warned later on
1316 # be warned later on
1317 em = matchmod.exact(match.files())
1317 em = matchmod.exact(match.files())
1318 nm = matchmod.unionmatcher([self._narrowmatch, em])
1318 nm = matchmod.unionmatcher([self._narrowmatch, em])
1319 return matchmod.intersectmatchers(match, nm)
1319 return matchmod.intersectmatchers(match, nm)
1320 return matchmod.intersectmatchers(match, self._narrowmatch)
1320 return matchmod.intersectmatchers(match, self._narrowmatch)
1321 return self._narrowmatch
1321 return self._narrowmatch
1322
1322
1323 def setnarrowpats(self, newincludes, newexcludes):
1323 def setnarrowpats(self, newincludes, newexcludes):
1324 narrowspec.save(self, newincludes, newexcludes)
1324 narrowspec.save(self, newincludes, newexcludes)
1325 self.invalidate(clearfilecache=True)
1325 self.invalidate(clearfilecache=True)
1326
1326
1327 def __getitem__(self, changeid):
1327 def __getitem__(self, changeid):
1328 if changeid is None:
1328 if changeid is None:
1329 return context.workingctx(self)
1329 return context.workingctx(self)
1330 if isinstance(changeid, context.basectx):
1330 if isinstance(changeid, context.basectx):
1331 return changeid
1331 return changeid
1332 if isinstance(changeid, slice):
1332 if isinstance(changeid, slice):
1333 # wdirrev isn't contiguous so the slice shouldn't include it
1333 # wdirrev isn't contiguous so the slice shouldn't include it
1334 return [self[i]
1334 return [self[i]
1335 for i in pycompat.xrange(*changeid.indices(len(self)))
1335 for i in pycompat.xrange(*changeid.indices(len(self)))
1336 if i not in self.changelog.filteredrevs]
1336 if i not in self.changelog.filteredrevs]
1337 try:
1337 try:
1338 if isinstance(changeid, int):
1338 if isinstance(changeid, int):
1339 node = self.changelog.node(changeid)
1339 node = self.changelog.node(changeid)
1340 rev = changeid
1340 rev = changeid
1341 elif changeid == 'null':
1341 elif changeid == 'null':
1342 node = nullid
1342 node = nullid
1343 rev = nullrev
1343 rev = nullrev
1344 elif changeid == 'tip':
1344 elif changeid == 'tip':
1345 node = self.changelog.tip()
1345 node = self.changelog.tip()
1346 rev = self.changelog.rev(node)
1346 rev = self.changelog.rev(node)
1347 elif changeid == '.':
1347 elif changeid == '.':
1348 # this is a hack to delay/avoid loading obsmarkers
1348 # this is a hack to delay/avoid loading obsmarkers
1349 # when we know that '.' won't be hidden
1349 # when we know that '.' won't be hidden
1350 node = self.dirstate.p1()
1350 node = self.dirstate.p1()
1351 rev = self.unfiltered().changelog.rev(node)
1351 rev = self.unfiltered().changelog.rev(node)
1352 elif len(changeid) == 20:
1352 elif len(changeid) == 20:
1353 try:
1353 try:
1354 node = changeid
1354 node = changeid
1355 rev = self.changelog.rev(changeid)
1355 rev = self.changelog.rev(changeid)
1356 except error.FilteredLookupError:
1356 except error.FilteredLookupError:
1357 changeid = hex(changeid) # for the error message
1357 changeid = hex(changeid) # for the error message
1358 raise
1358 raise
1359 except LookupError:
1359 except LookupError:
1360 # check if it might have come from damaged dirstate
1360 # check if it might have come from damaged dirstate
1361 #
1361 #
1362 # XXX we could avoid the unfiltered if we had a recognizable
1362 # XXX we could avoid the unfiltered if we had a recognizable
1363 # exception for filtered changeset access
1363 # exception for filtered changeset access
1364 if (self.local()
1364 if (self.local()
1365 and changeid in self.unfiltered().dirstate.parents()):
1365 and changeid in self.unfiltered().dirstate.parents()):
1366 msg = _("working directory has unknown parent '%s'!")
1366 msg = _("working directory has unknown parent '%s'!")
1367 raise error.Abort(msg % short(changeid))
1367 raise error.Abort(msg % short(changeid))
1368 changeid = hex(changeid) # for the error message
1368 changeid = hex(changeid) # for the error message
1369 raise
1369 raise
1370
1370
1371 elif len(changeid) == 40:
1371 elif len(changeid) == 40:
1372 node = bin(changeid)
1372 node = bin(changeid)
1373 rev = self.changelog.rev(node)
1373 rev = self.changelog.rev(node)
1374 else:
1374 else:
1375 raise error.ProgrammingError(
1375 raise error.ProgrammingError(
1376 "unsupported changeid '%s' of type %s" %
1376 "unsupported changeid '%s' of type %s" %
1377 (changeid, type(changeid)))
1377 (changeid, type(changeid)))
1378
1378
1379 return context.changectx(self, rev, node)
1379 return context.changectx(self, rev, node)
1380
1380
1381 except (error.FilteredIndexError, error.FilteredLookupError):
1381 except (error.FilteredIndexError, error.FilteredLookupError):
1382 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1382 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1383 % pycompat.bytestr(changeid))
1383 % pycompat.bytestr(changeid))
1384 except (IndexError, LookupError):
1384 except (IndexError, LookupError):
1385 raise error.RepoLookupError(
1385 raise error.RepoLookupError(
1386 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1386 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1387 except error.WdirUnsupported:
1387 except error.WdirUnsupported:
1388 return context.workingctx(self)
1388 return context.workingctx(self)
1389
1389
1390 def __contains__(self, changeid):
1390 def __contains__(self, changeid):
1391 """True if the given changeid exists
1391 """True if the given changeid exists
1392
1392
1393 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1393 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1394 specified.
1394 specified.
1395 """
1395 """
1396 try:
1396 try:
1397 self[changeid]
1397 self[changeid]
1398 return True
1398 return True
1399 except error.RepoLookupError:
1399 except error.RepoLookupError:
1400 return False
1400 return False
1401
1401
1402 def __nonzero__(self):
1402 def __nonzero__(self):
1403 return True
1403 return True
1404
1404
1405 __bool__ = __nonzero__
1405 __bool__ = __nonzero__
1406
1406
1407 def __len__(self):
1407 def __len__(self):
1408 # no need to pay the cost of repoview.changelog
1408 # no need to pay the cost of repoview.changelog
1409 unfi = self.unfiltered()
1409 unfi = self.unfiltered()
1410 return len(unfi.changelog)
1410 return len(unfi.changelog)
1411
1411
1412 def __iter__(self):
1412 def __iter__(self):
1413 return iter(self.changelog)
1413 return iter(self.changelog)
1414
1414
1415 def revs(self, expr, *args):
1415 def revs(self, expr, *args):
1416 '''Find revisions matching a revset.
1416 '''Find revisions matching a revset.
1417
1417
1418 The revset is specified as a string ``expr`` that may contain
1418 The revset is specified as a string ``expr`` that may contain
1419 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1419 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1420
1420
1421 Revset aliases from the configuration are not expanded. To expand
1421 Revset aliases from the configuration are not expanded. To expand
1422 user aliases, consider calling ``scmutil.revrange()`` or
1422 user aliases, consider calling ``scmutil.revrange()`` or
1423 ``repo.anyrevs([expr], user=True)``.
1423 ``repo.anyrevs([expr], user=True)``.
1424
1424
1425 Returns a revset.abstractsmartset, which is a list-like interface
1425 Returns a revset.abstractsmartset, which is a list-like interface
1426 that contains integer revisions.
1426 that contains integer revisions.
1427 '''
1427 '''
1428 tree = revsetlang.spectree(expr, *args)
1428 tree = revsetlang.spectree(expr, *args)
1429 return revset.makematcher(tree)(self)
1429 return revset.makematcher(tree)(self)
1430
1430
1431 def set(self, expr, *args):
1431 def set(self, expr, *args):
1432 '''Find revisions matching a revset and emit changectx instances.
1432 '''Find revisions matching a revset and emit changectx instances.
1433
1433
1434 This is a convenience wrapper around ``revs()`` that iterates the
1434 This is a convenience wrapper around ``revs()`` that iterates the
1435 result and is a generator of changectx instances.
1435 result and is a generator of changectx instances.
1436
1436
1437 Revset aliases from the configuration are not expanded. To expand
1437 Revset aliases from the configuration are not expanded. To expand
1438 user aliases, consider calling ``scmutil.revrange()``.
1438 user aliases, consider calling ``scmutil.revrange()``.
1439 '''
1439 '''
1440 for r in self.revs(expr, *args):
1440 for r in self.revs(expr, *args):
1441 yield self[r]
1441 yield self[r]
1442
1442
1443 def anyrevs(self, specs, user=False, localalias=None):
1443 def anyrevs(self, specs, user=False, localalias=None):
1444 '''Find revisions matching one of the given revsets.
1444 '''Find revisions matching one of the given revsets.
1445
1445
1446 Revset aliases from the configuration are not expanded by default. To
1446 Revset aliases from the configuration are not expanded by default. To
1447 expand user aliases, specify ``user=True``. To provide some local
1447 expand user aliases, specify ``user=True``. To provide some local
1448 definitions overriding user aliases, set ``localalias`` to
1448 definitions overriding user aliases, set ``localalias`` to
1449 ``{name: definitionstring}``.
1449 ``{name: definitionstring}``.
1450 '''
1450 '''
1451 if user:
1451 if user:
1452 m = revset.matchany(self.ui, specs,
1452 m = revset.matchany(self.ui, specs,
1453 lookup=revset.lookupfn(self),
1453 lookup=revset.lookupfn(self),
1454 localalias=localalias)
1454 localalias=localalias)
1455 else:
1455 else:
1456 m = revset.matchany(None, specs, localalias=localalias)
1456 m = revset.matchany(None, specs, localalias=localalias)
1457 return m(self)
1457 return m(self)
1458
1458
1459 def url(self):
1459 def url(self):
1460 return 'file:' + self.root
1460 return 'file:' + self.root
1461
1461
1462 def hook(self, name, throw=False, **args):
1462 def hook(self, name, throw=False, **args):
1463 """Call a hook, passing this repo instance.
1463 """Call a hook, passing this repo instance.
1464
1464
1465 This a convenience method to aid invoking hooks. Extensions likely
1465 This a convenience method to aid invoking hooks. Extensions likely
1466 won't call this unless they have registered a custom hook or are
1466 won't call this unless they have registered a custom hook or are
1467 replacing code that is expected to call a hook.
1467 replacing code that is expected to call a hook.
1468 """
1468 """
1469 return hook.hook(self.ui, self, name, throw, **args)
1469 return hook.hook(self.ui, self, name, throw, **args)
1470
1470
1471 @filteredpropertycache
1471 @filteredpropertycache
1472 def _tagscache(self):
1472 def _tagscache(self):
1473 '''Returns a tagscache object that contains various tags related
1473 '''Returns a tagscache object that contains various tags related
1474 caches.'''
1474 caches.'''
1475
1475
1476 # This simplifies its cache management by having one decorated
1476 # This simplifies its cache management by having one decorated
1477 # function (this one) and the rest simply fetch things from it.
1477 # function (this one) and the rest simply fetch things from it.
1478 class tagscache(object):
1478 class tagscache(object):
1479 def __init__(self):
1479 def __init__(self):
1480 # These two define the set of tags for this repository. tags
1480 # These two define the set of tags for this repository. tags
1481 # maps tag name to node; tagtypes maps tag name to 'global' or
1481 # maps tag name to node; tagtypes maps tag name to 'global' or
1482 # 'local'. (Global tags are defined by .hgtags across all
1482 # 'local'. (Global tags are defined by .hgtags across all
1483 # heads, and local tags are defined in .hg/localtags.)
1483 # heads, and local tags are defined in .hg/localtags.)
1484 # They constitute the in-memory cache of tags.
1484 # They constitute the in-memory cache of tags.
1485 self.tags = self.tagtypes = None
1485 self.tags = self.tagtypes = None
1486
1486
1487 self.nodetagscache = self.tagslist = None
1487 self.nodetagscache = self.tagslist = None
1488
1488
1489 cache = tagscache()
1489 cache = tagscache()
1490 cache.tags, cache.tagtypes = self._findtags()
1490 cache.tags, cache.tagtypes = self._findtags()
1491
1491
1492 return cache
1492 return cache
1493
1493
1494 def tags(self):
1494 def tags(self):
1495 '''return a mapping of tag to node'''
1495 '''return a mapping of tag to node'''
1496 t = {}
1496 t = {}
1497 if self.changelog.filteredrevs:
1497 if self.changelog.filteredrevs:
1498 tags, tt = self._findtags()
1498 tags, tt = self._findtags()
1499 else:
1499 else:
1500 tags = self._tagscache.tags
1500 tags = self._tagscache.tags
1501 rev = self.changelog.rev
1501 rev = self.changelog.rev
1502 for k, v in tags.iteritems():
1502 for k, v in tags.iteritems():
1503 try:
1503 try:
1504 # ignore tags to unknown nodes
1504 # ignore tags to unknown nodes
1505 rev(v)
1505 rev(v)
1506 t[k] = v
1506 t[k] = v
1507 except (error.LookupError, ValueError):
1507 except (error.LookupError, ValueError):
1508 pass
1508 pass
1509 return t
1509 return t
1510
1510
1511 def _findtags(self):
1511 def _findtags(self):
1512 '''Do the hard work of finding tags. Return a pair of dicts
1512 '''Do the hard work of finding tags. Return a pair of dicts
1513 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1513 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1514 maps tag name to a string like \'global\' or \'local\'.
1514 maps tag name to a string like \'global\' or \'local\'.
1515 Subclasses or extensions are free to add their own tags, but
1515 Subclasses or extensions are free to add their own tags, but
1516 should be aware that the returned dicts will be retained for the
1516 should be aware that the returned dicts will be retained for the
1517 duration of the localrepo object.'''
1517 duration of the localrepo object.'''
1518
1518
1519 # XXX what tagtype should subclasses/extensions use? Currently
1519 # XXX what tagtype should subclasses/extensions use? Currently
1520 # mq and bookmarks add tags, but do not set the tagtype at all.
1520 # mq and bookmarks add tags, but do not set the tagtype at all.
1521 # Should each extension invent its own tag type? Should there
1521 # Should each extension invent its own tag type? Should there
1522 # be one tagtype for all such "virtual" tags? Or is the status
1522 # be one tagtype for all such "virtual" tags? Or is the status
1523 # quo fine?
1523 # quo fine?
1524
1524
1525
1525
1526 # map tag name to (node, hist)
1526 # map tag name to (node, hist)
1527 alltags = tagsmod.findglobaltags(self.ui, self)
1527 alltags = tagsmod.findglobaltags(self.ui, self)
1528 # map tag name to tag type
1528 # map tag name to tag type
1529 tagtypes = dict((tag, 'global') for tag in alltags)
1529 tagtypes = dict((tag, 'global') for tag in alltags)
1530
1530
1531 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1531 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1532
1532
1533 # Build the return dicts. Have to re-encode tag names because
1533 # Build the return dicts. Have to re-encode tag names because
1534 # the tags module always uses UTF-8 (in order not to lose info
1534 # the tags module always uses UTF-8 (in order not to lose info
1535 # writing to the cache), but the rest of Mercurial wants them in
1535 # writing to the cache), but the rest of Mercurial wants them in
1536 # local encoding.
1536 # local encoding.
1537 tags = {}
1537 tags = {}
1538 for (name, (node, hist)) in alltags.iteritems():
1538 for (name, (node, hist)) in alltags.iteritems():
1539 if node != nullid:
1539 if node != nullid:
1540 tags[encoding.tolocal(name)] = node
1540 tags[encoding.tolocal(name)] = node
1541 tags['tip'] = self.changelog.tip()
1541 tags['tip'] = self.changelog.tip()
1542 tagtypes = dict([(encoding.tolocal(name), value)
1542 tagtypes = dict([(encoding.tolocal(name), value)
1543 for (name, value) in tagtypes.iteritems()])
1543 for (name, value) in tagtypes.iteritems()])
1544 return (tags, tagtypes)
1544 return (tags, tagtypes)
1545
1545
1546 def tagtype(self, tagname):
1546 def tagtype(self, tagname):
1547 '''
1547 '''
1548 return the type of the given tag. result can be:
1548 return the type of the given tag. result can be:
1549
1549
1550 'local' : a local tag
1550 'local' : a local tag
1551 'global' : a global tag
1551 'global' : a global tag
1552 None : tag does not exist
1552 None : tag does not exist
1553 '''
1553 '''
1554
1554
1555 return self._tagscache.tagtypes.get(tagname)
1555 return self._tagscache.tagtypes.get(tagname)
1556
1556
1557 def tagslist(self):
1557 def tagslist(self):
1558 '''return a list of tags ordered by revision'''
1558 '''return a list of tags ordered by revision'''
1559 if not self._tagscache.tagslist:
1559 if not self._tagscache.tagslist:
1560 l = []
1560 l = []
1561 for t, n in self.tags().iteritems():
1561 for t, n in self.tags().iteritems():
1562 l.append((self.changelog.rev(n), t, n))
1562 l.append((self.changelog.rev(n), t, n))
1563 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1563 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1564
1564
1565 return self._tagscache.tagslist
1565 return self._tagscache.tagslist
1566
1566
1567 def nodetags(self, node):
1567 def nodetags(self, node):
1568 '''return the tags associated with a node'''
1568 '''return the tags associated with a node'''
1569 if not self._tagscache.nodetagscache:
1569 if not self._tagscache.nodetagscache:
1570 nodetagscache = {}
1570 nodetagscache = {}
1571 for t, n in self._tagscache.tags.iteritems():
1571 for t, n in self._tagscache.tags.iteritems():
1572 nodetagscache.setdefault(n, []).append(t)
1572 nodetagscache.setdefault(n, []).append(t)
1573 for tags in nodetagscache.itervalues():
1573 for tags in nodetagscache.itervalues():
1574 tags.sort()
1574 tags.sort()
1575 self._tagscache.nodetagscache = nodetagscache
1575 self._tagscache.nodetagscache = nodetagscache
1576 return self._tagscache.nodetagscache.get(node, [])
1576 return self._tagscache.nodetagscache.get(node, [])
1577
1577
1578 def nodebookmarks(self, node):
1578 def nodebookmarks(self, node):
1579 """return the list of bookmarks pointing to the specified node"""
1579 """return the list of bookmarks pointing to the specified node"""
1580 return self._bookmarks.names(node)
1580 return self._bookmarks.names(node)
1581
1581
1582 def branchmap(self):
1582 def branchmap(self):
1583 '''returns a dictionary {branch: [branchheads]} with branchheads
1583 '''returns a dictionary {branch: [branchheads]} with branchheads
1584 ordered by increasing revision number'''
1584 ordered by increasing revision number'''
1585 return self._branchcaches[self]
1585 return self._branchcaches[self]
1586
1586
1587 @unfilteredmethod
1587 @unfilteredmethod
1588 def revbranchcache(self):
1588 def revbranchcache(self):
1589 if not self._revbranchcache:
1589 if not self._revbranchcache:
1590 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1590 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1591 return self._revbranchcache
1591 return self._revbranchcache
1592
1592
1593 def branchtip(self, branch, ignoremissing=False):
1593 def branchtip(self, branch, ignoremissing=False):
1594 '''return the tip node for a given branch
1594 '''return the tip node for a given branch
1595
1595
1596 If ignoremissing is True, then this method will not raise an error.
1596 If ignoremissing is True, then this method will not raise an error.
1597 This is helpful for callers that only expect None for a missing branch
1597 This is helpful for callers that only expect None for a missing branch
1598 (e.g. namespace).
1598 (e.g. namespace).
1599
1599
1600 '''
1600 '''
1601 try:
1601 try:
1602 return self.branchmap().branchtip(branch)
1602 return self.branchmap().branchtip(branch)
1603 except KeyError:
1603 except KeyError:
1604 if not ignoremissing:
1604 if not ignoremissing:
1605 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1605 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1606 else:
1606 else:
1607 pass
1607 pass
1608
1608
1609 def lookup(self, key):
1609 def lookup(self, key):
1610 node = scmutil.revsymbol(self, key).node()
1610 node = scmutil.revsymbol(self, key).node()
1611 if node is None:
1611 if node is None:
1612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1613 return node
1613 return node
1614
1614
1615 def lookupbranch(self, key):
1615 def lookupbranch(self, key):
1616 if self.branchmap().hasbranch(key):
1616 if self.branchmap().hasbranch(key):
1617 return key
1617 return key
1618
1618
1619 return scmutil.revsymbol(self, key).branch()
1619 return scmutil.revsymbol(self, key).branch()
1620
1620
1621 def known(self, nodes):
1621 def known(self, nodes):
1622 cl = self.changelog
1622 cl = self.changelog
1623 nm = cl.nodemap
1623 nm = cl.nodemap
1624 filtered = cl.filteredrevs
1624 filtered = cl.filteredrevs
1625 result = []
1625 result = []
1626 for n in nodes:
1626 for n in nodes:
1627 r = nm.get(n)
1627 r = nm.get(n)
1628 resp = not (r is None or r in filtered)
1628 resp = not (r is None or r in filtered)
1629 result.append(resp)
1629 result.append(resp)
1630 return result
1630 return result
1631
1631
1632 def local(self):
1632 def local(self):
1633 return self
1633 return self
1634
1634
1635 def publishing(self):
1635 def publishing(self):
1636 # it's safe (and desirable) to trust the publish flag unconditionally
1636 # it's safe (and desirable) to trust the publish flag unconditionally
1637 # so that we don't finalize changes shared between users via ssh or nfs
1637 # so that we don't finalize changes shared between users via ssh or nfs
1638 return self.ui.configbool('phases', 'publish', untrusted=True)
1638 return self.ui.configbool('phases', 'publish', untrusted=True)
1639
1639
1640 def cancopy(self):
1640 def cancopy(self):
1641 # so statichttprepo's override of local() works
1641 # so statichttprepo's override of local() works
1642 if not self.local():
1642 if not self.local():
1643 return False
1643 return False
1644 if not self.publishing():
1644 if not self.publishing():
1645 return True
1645 return True
1646 # if publishing we can't copy if there is filtered content
1646 # if publishing we can't copy if there is filtered content
1647 return not self.filtered('visible').changelog.filteredrevs
1647 return not self.filtered('visible').changelog.filteredrevs
1648
1648
1649 def shared(self):
1649 def shared(self):
1650 '''the type of shared repository (None if not shared)'''
1650 '''the type of shared repository (None if not shared)'''
1651 if self.sharedpath != self.path:
1651 if self.sharedpath != self.path:
1652 return 'store'
1652 return 'store'
1653 return None
1653 return None
1654
1654
1655 def wjoin(self, f, *insidef):
1655 def wjoin(self, f, *insidef):
1656 return self.vfs.reljoin(self.root, f, *insidef)
1656 return self.vfs.reljoin(self.root, f, *insidef)
1657
1657
1658 def setparents(self, p1, p2=nullid):
1658 def setparents(self, p1, p2=nullid):
1659 with self.dirstate.parentchange():
1659 with self.dirstate.parentchange():
1660 copies = self.dirstate.setparents(p1, p2)
1660 copies = self.dirstate.setparents(p1, p2)
1661 pctx = self[p1]
1661 pctx = self[p1]
1662 if copies:
1662 if copies:
1663 # Adjust copy records, the dirstate cannot do it, it
1663 # Adjust copy records, the dirstate cannot do it, it
1664 # requires access to parents manifests. Preserve them
1664 # requires access to parents manifests. Preserve them
1665 # only for entries added to first parent.
1665 # only for entries added to first parent.
1666 for f in copies:
1666 for f in copies:
1667 if f not in pctx and copies[f] in pctx:
1667 if f not in pctx and copies[f] in pctx:
1668 self.dirstate.copy(copies[f], f)
1668 self.dirstate.copy(copies[f], f)
1669 if p2 == nullid:
1669 if p2 == nullid:
1670 for f, s in sorted(self.dirstate.copies().items()):
1670 for f, s in sorted(self.dirstate.copies().items()):
1671 if f not in pctx and s not in pctx:
1671 if f not in pctx and s not in pctx:
1672 self.dirstate.copy(None, f)
1672 self.dirstate.copy(None, f)
1673
1673
1674 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1674 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1675 """changeid must be a changeset revision, if specified.
1675 """changeid must be a changeset revision, if specified.
1676 fileid can be a file revision or node."""
1676 fileid can be a file revision or node."""
1677 return context.filectx(self, path, changeid, fileid,
1677 return context.filectx(self, path, changeid, fileid,
1678 changectx=changectx)
1678 changectx=changectx)
1679
1679
1680 def getcwd(self):
1680 def getcwd(self):
1681 return self.dirstate.getcwd()
1681 return self.dirstate.getcwd()
1682
1682
1683 def pathto(self, f, cwd=None):
1683 def pathto(self, f, cwd=None):
1684 return self.dirstate.pathto(f, cwd)
1684 return self.dirstate.pathto(f, cwd)
1685
1685
1686 def _loadfilter(self, filter):
1686 def _loadfilter(self, filter):
1687 if filter not in self._filterpats:
1687 if filter not in self._filterpats:
1688 l = []
1688 l = []
1689 for pat, cmd in self.ui.configitems(filter):
1689 for pat, cmd in self.ui.configitems(filter):
1690 if cmd == '!':
1690 if cmd == '!':
1691 continue
1691 continue
1692 mf = matchmod.match(self.root, '', [pat])
1692 mf = matchmod.match(self.root, '', [pat])
1693 fn = None
1693 fn = None
1694 params = cmd
1694 params = cmd
1695 for name, filterfn in self._datafilters.iteritems():
1695 for name, filterfn in self._datafilters.iteritems():
1696 if cmd.startswith(name):
1696 if cmd.startswith(name):
1697 fn = filterfn
1697 fn = filterfn
1698 params = cmd[len(name):].lstrip()
1698 params = cmd[len(name):].lstrip()
1699 break
1699 break
1700 if not fn:
1700 if not fn:
1701 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1701 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1702 # Wrap old filters not supporting keyword arguments
1702 # Wrap old filters not supporting keyword arguments
1703 if not pycompat.getargspec(fn)[2]:
1703 if not pycompat.getargspec(fn)[2]:
1704 oldfn = fn
1704 oldfn = fn
1705 fn = lambda s, c, **kwargs: oldfn(s, c)
1705 fn = lambda s, c, **kwargs: oldfn(s, c)
1706 l.append((mf, fn, params))
1706 l.append((mf, fn, params))
1707 self._filterpats[filter] = l
1707 self._filterpats[filter] = l
1708 return self._filterpats[filter]
1708 return self._filterpats[filter]
1709
1709
1710 def _filter(self, filterpats, filename, data):
1710 def _filter(self, filterpats, filename, data):
1711 for mf, fn, cmd in filterpats:
1711 for mf, fn, cmd in filterpats:
1712 if mf(filename):
1712 if mf(filename):
1713 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1713 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1714 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1714 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1715 break
1715 break
1716
1716
1717 return data
1717 return data
1718
1718
1719 @unfilteredpropertycache
1719 @unfilteredpropertycache
1720 def _encodefilterpats(self):
1720 def _encodefilterpats(self):
1721 return self._loadfilter('encode')
1721 return self._loadfilter('encode')
1722
1722
1723 @unfilteredpropertycache
1723 @unfilteredpropertycache
1724 def _decodefilterpats(self):
1724 def _decodefilterpats(self):
1725 return self._loadfilter('decode')
1725 return self._loadfilter('decode')
1726
1726
1727 def adddatafilter(self, name, filter):
1727 def adddatafilter(self, name, filter):
1728 self._datafilters[name] = filter
1728 self._datafilters[name] = filter
1729
1729
1730 def wread(self, filename):
1730 def wread(self, filename):
1731 if self.wvfs.islink(filename):
1731 if self.wvfs.islink(filename):
1732 data = self.wvfs.readlink(filename)
1732 data = self.wvfs.readlink(filename)
1733 else:
1733 else:
1734 data = self.wvfs.read(filename)
1734 data = self.wvfs.read(filename)
1735 return self._filter(self._encodefilterpats, filename, data)
1735 return self._filter(self._encodefilterpats, filename, data)
1736
1736
1737 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1737 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1738 """write ``data`` into ``filename`` in the working directory
1738 """write ``data`` into ``filename`` in the working directory
1739
1739
1740 This returns length of written (maybe decoded) data.
1740 This returns length of written (maybe decoded) data.
1741 """
1741 """
1742 data = self._filter(self._decodefilterpats, filename, data)
1742 data = self._filter(self._decodefilterpats, filename, data)
1743 if 'l' in flags:
1743 if 'l' in flags:
1744 self.wvfs.symlink(data, filename)
1744 self.wvfs.symlink(data, filename)
1745 else:
1745 else:
1746 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1746 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1747 **kwargs)
1747 **kwargs)
1748 if 'x' in flags:
1748 if 'x' in flags:
1749 self.wvfs.setflags(filename, False, True)
1749 self.wvfs.setflags(filename, False, True)
1750 else:
1750 else:
1751 self.wvfs.setflags(filename, False, False)
1751 self.wvfs.setflags(filename, False, False)
1752 return len(data)
1752 return len(data)
1753
1753
1754 def wwritedata(self, filename, data):
1754 def wwritedata(self, filename, data):
1755 return self._filter(self._decodefilterpats, filename, data)
1755 return self._filter(self._decodefilterpats, filename, data)
1756
1756
1757 def currenttransaction(self):
1757 def currenttransaction(self):
1758 """return the current transaction or None if non exists"""
1758 """return the current transaction or None if non exists"""
1759 if self._transref:
1759 if self._transref:
1760 tr = self._transref()
1760 tr = self._transref()
1761 else:
1761 else:
1762 tr = None
1762 tr = None
1763
1763
1764 if tr and tr.running():
1764 if tr and tr.running():
1765 return tr
1765 return tr
1766 return None
1766 return None
1767
1767
1768 def transaction(self, desc, report=None):
1768 def transaction(self, desc, report=None):
1769 if (self.ui.configbool('devel', 'all-warnings')
1769 if (self.ui.configbool('devel', 'all-warnings')
1770 or self.ui.configbool('devel', 'check-locks')):
1770 or self.ui.configbool('devel', 'check-locks')):
1771 if self._currentlock(self._lockref) is None:
1771 if self._currentlock(self._lockref) is None:
1772 raise error.ProgrammingError('transaction requires locking')
1772 raise error.ProgrammingError('transaction requires locking')
1773 tr = self.currenttransaction()
1773 tr = self.currenttransaction()
1774 if tr is not None:
1774 if tr is not None:
1775 return tr.nest(name=desc)
1775 return tr.nest(name=desc)
1776
1776
1777 # abort here if the journal already exists
1777 # abort here if the journal already exists
1778 if self.svfs.exists("journal"):
1778 if self.svfs.exists("journal"):
1779 raise error.RepoError(
1779 raise error.RepoError(
1780 _("abandoned transaction found"),
1780 _("abandoned transaction found"),
1781 hint=_("run 'hg recover' to clean up transaction"))
1781 hint=_("run 'hg recover' to clean up transaction"))
1782
1782
1783 idbase = "%.40f#%f" % (random.random(), time.time())
1783 idbase = "%.40f#%f" % (random.random(), time.time())
1784 ha = hex(hashlib.sha1(idbase).digest())
1784 ha = hex(hashlib.sha1(idbase).digest())
1785 txnid = 'TXN:' + ha
1785 txnid = 'TXN:' + ha
1786 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1786 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1787
1787
1788 self._writejournal(desc)
1788 self._writejournal(desc)
1789 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1789 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1790 if report:
1790 if report:
1791 rp = report
1791 rp = report
1792 else:
1792 else:
1793 rp = self.ui.warn
1793 rp = self.ui.warn
1794 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1794 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1795 # we must avoid cyclic reference between repo and transaction.
1795 # we must avoid cyclic reference between repo and transaction.
1796 reporef = weakref.ref(self)
1796 reporef = weakref.ref(self)
1797 # Code to track tag movement
1797 # Code to track tag movement
1798 #
1798 #
1799 # Since tags are all handled as file content, it is actually quite hard
1799 # Since tags are all handled as file content, it is actually quite hard
1800 # to track these movement from a code perspective. So we fallback to a
1800 # to track these movement from a code perspective. So we fallback to a
1801 # tracking at the repository level. One could envision to track changes
1801 # tracking at the repository level. One could envision to track changes
1802 # to the '.hgtags' file through changegroup apply but that fails to
1802 # to the '.hgtags' file through changegroup apply but that fails to
1803 # cope with case where transaction expose new heads without changegroup
1803 # cope with case where transaction expose new heads without changegroup
1804 # being involved (eg: phase movement).
1804 # being involved (eg: phase movement).
1805 #
1805 #
1806 # For now, We gate the feature behind a flag since this likely comes
1806 # For now, We gate the feature behind a flag since this likely comes
1807 # with performance impacts. The current code run more often than needed
1807 # with performance impacts. The current code run more often than needed
1808 # and do not use caches as much as it could. The current focus is on
1808 # and do not use caches as much as it could. The current focus is on
1809 # the behavior of the feature so we disable it by default. The flag
1809 # the behavior of the feature so we disable it by default. The flag
1810 # will be removed when we are happy with the performance impact.
1810 # will be removed when we are happy with the performance impact.
1811 #
1811 #
1812 # Once this feature is no longer experimental move the following
1812 # Once this feature is no longer experimental move the following
1813 # documentation to the appropriate help section:
1813 # documentation to the appropriate help section:
1814 #
1814 #
1815 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1815 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1816 # tags (new or changed or deleted tags). In addition the details of
1816 # tags (new or changed or deleted tags). In addition the details of
1817 # these changes are made available in a file at:
1817 # these changes are made available in a file at:
1818 # ``REPOROOT/.hg/changes/tags.changes``.
1818 # ``REPOROOT/.hg/changes/tags.changes``.
1819 # Make sure you check for HG_TAG_MOVED before reading that file as it
1819 # Make sure you check for HG_TAG_MOVED before reading that file as it
1820 # might exist from a previous transaction even if no tag were touched
1820 # might exist from a previous transaction even if no tag were touched
1821 # in this one. Changes are recorded in a line base format::
1821 # in this one. Changes are recorded in a line base format::
1822 #
1822 #
1823 # <action> <hex-node> <tag-name>\n
1823 # <action> <hex-node> <tag-name>\n
1824 #
1824 #
1825 # Actions are defined as follow:
1825 # Actions are defined as follow:
1826 # "-R": tag is removed,
1826 # "-R": tag is removed,
1827 # "+A": tag is added,
1827 # "+A": tag is added,
1828 # "-M": tag is moved (old value),
1828 # "-M": tag is moved (old value),
1829 # "+M": tag is moved (new value),
1829 # "+M": tag is moved (new value),
1830 tracktags = lambda x: None
1830 tracktags = lambda x: None
1831 # experimental config: experimental.hook-track-tags
1831 # experimental config: experimental.hook-track-tags
1832 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1832 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1833 if desc != 'strip' and shouldtracktags:
1833 if desc != 'strip' and shouldtracktags:
1834 oldheads = self.changelog.headrevs()
1834 oldheads = self.changelog.headrevs()
1835 def tracktags(tr2):
1835 def tracktags(tr2):
1836 repo = reporef()
1836 repo = reporef()
1837 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1837 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1838 newheads = repo.changelog.headrevs()
1838 newheads = repo.changelog.headrevs()
1839 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1839 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1840 # notes: we compare lists here.
1840 # notes: we compare lists here.
1841 # As we do it only once buiding set would not be cheaper
1841 # As we do it only once buiding set would not be cheaper
1842 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1842 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1843 if changes:
1843 if changes:
1844 tr2.hookargs['tag_moved'] = '1'
1844 tr2.hookargs['tag_moved'] = '1'
1845 with repo.vfs('changes/tags.changes', 'w',
1845 with repo.vfs('changes/tags.changes', 'w',
1846 atomictemp=True) as changesfile:
1846 atomictemp=True) as changesfile:
1847 # note: we do not register the file to the transaction
1847 # note: we do not register the file to the transaction
1848 # because we needs it to still exist on the transaction
1848 # because we needs it to still exist on the transaction
1849 # is close (for txnclose hooks)
1849 # is close (for txnclose hooks)
1850 tagsmod.writediff(changesfile, changes)
1850 tagsmod.writediff(changesfile, changes)
1851 def validate(tr2):
1851 def validate(tr2):
1852 """will run pre-closing hooks"""
1852 """will run pre-closing hooks"""
1853 # XXX the transaction API is a bit lacking here so we take a hacky
1853 # XXX the transaction API is a bit lacking here so we take a hacky
1854 # path for now
1854 # path for now
1855 #
1855 #
1856 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1856 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1857 # dict is copied before these run. In addition we needs the data
1857 # dict is copied before these run. In addition we needs the data
1858 # available to in memory hooks too.
1858 # available to in memory hooks too.
1859 #
1859 #
1860 # Moreover, we also need to make sure this runs before txnclose
1860 # Moreover, we also need to make sure this runs before txnclose
1861 # hooks and there is no "pending" mechanism that would execute
1861 # hooks and there is no "pending" mechanism that would execute
1862 # logic only if hooks are about to run.
1862 # logic only if hooks are about to run.
1863 #
1863 #
1864 # Fixing this limitation of the transaction is also needed to track
1864 # Fixing this limitation of the transaction is also needed to track
1865 # other families of changes (bookmarks, phases, obsolescence).
1865 # other families of changes (bookmarks, phases, obsolescence).
1866 #
1866 #
1867 # This will have to be fixed before we remove the experimental
1867 # This will have to be fixed before we remove the experimental
1868 # gating.
1868 # gating.
1869 tracktags(tr2)
1869 tracktags(tr2)
1870 repo = reporef()
1870 repo = reporef()
1871 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1871 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1872 scmutil.enforcesinglehead(repo, tr2, desc)
1872 scmutil.enforcesinglehead(repo, tr2, desc)
1873 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1873 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1874 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1874 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1875 args = tr.hookargs.copy()
1875 args = tr.hookargs.copy()
1876 args.update(bookmarks.preparehookargs(name, old, new))
1876 args.update(bookmarks.preparehookargs(name, old, new))
1877 repo.hook('pretxnclose-bookmark', throw=True,
1877 repo.hook('pretxnclose-bookmark', throw=True,
1878 **pycompat.strkwargs(args))
1878 **pycompat.strkwargs(args))
1879 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1879 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1880 cl = repo.unfiltered().changelog
1880 cl = repo.unfiltered().changelog
1881 for rev, (old, new) in tr.changes['phases'].items():
1881 for rev, (old, new) in tr.changes['phases'].items():
1882 args = tr.hookargs.copy()
1882 args = tr.hookargs.copy()
1883 node = hex(cl.node(rev))
1883 node = hex(cl.node(rev))
1884 args.update(phases.preparehookargs(node, old, new))
1884 args.update(phases.preparehookargs(node, old, new))
1885 repo.hook('pretxnclose-phase', throw=True,
1885 repo.hook('pretxnclose-phase', throw=True,
1886 **pycompat.strkwargs(args))
1886 **pycompat.strkwargs(args))
1887
1887
1888 repo.hook('pretxnclose', throw=True,
1888 repo.hook('pretxnclose', throw=True,
1889 **pycompat.strkwargs(tr.hookargs))
1889 **pycompat.strkwargs(tr.hookargs))
1890 def releasefn(tr, success):
1890 def releasefn(tr, success):
1891 repo = reporef()
1891 repo = reporef()
1892 if success:
1892 if success:
1893 # this should be explicitly invoked here, because
1893 # this should be explicitly invoked here, because
1894 # in-memory changes aren't written out at closing
1894 # in-memory changes aren't written out at closing
1895 # transaction, if tr.addfilegenerator (via
1895 # transaction, if tr.addfilegenerator (via
1896 # dirstate.write or so) isn't invoked while
1896 # dirstate.write or so) isn't invoked while
1897 # transaction running
1897 # transaction running
1898 repo.dirstate.write(None)
1898 repo.dirstate.write(None)
1899 else:
1899 else:
1900 # discard all changes (including ones already written
1900 # discard all changes (including ones already written
1901 # out) in this transaction
1901 # out) in this transaction
1902 narrowspec.restorebackup(self, 'journal.narrowspec')
1902 narrowspec.restorebackup(self, 'journal.narrowspec')
1903 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1903 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1904 repo.dirstate.restorebackup(None, 'journal.dirstate')
1904 repo.dirstate.restorebackup(None, 'journal.dirstate')
1905
1905
1906 repo.invalidate(clearfilecache=True)
1906 repo.invalidate(clearfilecache=True)
1907
1907
1908 tr = transaction.transaction(rp, self.svfs, vfsmap,
1908 tr = transaction.transaction(rp, self.svfs, vfsmap,
1909 "journal",
1909 "journal",
1910 "undo",
1910 "undo",
1911 aftertrans(renames),
1911 aftertrans(renames),
1912 self.store.createmode,
1912 self.store.createmode,
1913 validator=validate,
1913 validator=validate,
1914 releasefn=releasefn,
1914 releasefn=releasefn,
1915 checkambigfiles=_cachedfiles,
1915 checkambigfiles=_cachedfiles,
1916 name=desc)
1916 name=desc)
1917 tr.changes['origrepolen'] = len(self)
1917 tr.changes['origrepolen'] = len(self)
1918 tr.changes['obsmarkers'] = set()
1918 tr.changes['obsmarkers'] = set()
1919 tr.changes['phases'] = {}
1919 tr.changes['phases'] = {}
1920 tr.changes['bookmarks'] = {}
1920 tr.changes['bookmarks'] = {}
1921
1921
1922 tr.hookargs['txnid'] = txnid
1922 tr.hookargs['txnid'] = txnid
1923 tr.hookargs['txnname'] = desc
1923 tr.hookargs['txnname'] = desc
1924 # note: writing the fncache only during finalize mean that the file is
1924 # note: writing the fncache only during finalize mean that the file is
1925 # outdated when running hooks. As fncache is used for streaming clone,
1925 # outdated when running hooks. As fncache is used for streaming clone,
1926 # this is not expected to break anything that happen during the hooks.
1926 # this is not expected to break anything that happen during the hooks.
1927 tr.addfinalize('flush-fncache', self.store.write)
1927 tr.addfinalize('flush-fncache', self.store.write)
1928 def txnclosehook(tr2):
1928 def txnclosehook(tr2):
1929 """To be run if transaction is successful, will schedule a hook run
1929 """To be run if transaction is successful, will schedule a hook run
1930 """
1930 """
1931 # Don't reference tr2 in hook() so we don't hold a reference.
1931 # Don't reference tr2 in hook() so we don't hold a reference.
1932 # This reduces memory consumption when there are multiple
1932 # This reduces memory consumption when there are multiple
1933 # transactions per lock. This can likely go away if issue5045
1933 # transactions per lock. This can likely go away if issue5045
1934 # fixes the function accumulation.
1934 # fixes the function accumulation.
1935 hookargs = tr2.hookargs
1935 hookargs = tr2.hookargs
1936
1936
1937 def hookfunc():
1937 def hookfunc():
1938 repo = reporef()
1938 repo = reporef()
1939 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1939 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1940 bmchanges = sorted(tr.changes['bookmarks'].items())
1940 bmchanges = sorted(tr.changes['bookmarks'].items())
1941 for name, (old, new) in bmchanges:
1941 for name, (old, new) in bmchanges:
1942 args = tr.hookargs.copy()
1942 args = tr.hookargs.copy()
1943 args.update(bookmarks.preparehookargs(name, old, new))
1943 args.update(bookmarks.preparehookargs(name, old, new))
1944 repo.hook('txnclose-bookmark', throw=False,
1944 repo.hook('txnclose-bookmark', throw=False,
1945 **pycompat.strkwargs(args))
1945 **pycompat.strkwargs(args))
1946
1946
1947 if hook.hashook(repo.ui, 'txnclose-phase'):
1947 if hook.hashook(repo.ui, 'txnclose-phase'):
1948 cl = repo.unfiltered().changelog
1948 cl = repo.unfiltered().changelog
1949 phasemv = sorted(tr.changes['phases'].items())
1949 phasemv = sorted(tr.changes['phases'].items())
1950 for rev, (old, new) in phasemv:
1950 for rev, (old, new) in phasemv:
1951 args = tr.hookargs.copy()
1951 args = tr.hookargs.copy()
1952 node = hex(cl.node(rev))
1952 node = hex(cl.node(rev))
1953 args.update(phases.preparehookargs(node, old, new))
1953 args.update(phases.preparehookargs(node, old, new))
1954 repo.hook('txnclose-phase', throw=False,
1954 repo.hook('txnclose-phase', throw=False,
1955 **pycompat.strkwargs(args))
1955 **pycompat.strkwargs(args))
1956
1956
1957 repo.hook('txnclose', throw=False,
1957 repo.hook('txnclose', throw=False,
1958 **pycompat.strkwargs(hookargs))
1958 **pycompat.strkwargs(hookargs))
1959 reporef()._afterlock(hookfunc)
1959 reporef()._afterlock(hookfunc)
1960 tr.addfinalize('txnclose-hook', txnclosehook)
1960 tr.addfinalize('txnclose-hook', txnclosehook)
1961 # Include a leading "-" to make it happen before the transaction summary
1961 # Include a leading "-" to make it happen before the transaction summary
1962 # reports registered via scmutil.registersummarycallback() whose names
1962 # reports registered via scmutil.registersummarycallback() whose names
1963 # are 00-txnreport etc. That way, the caches will be warm when the
1963 # are 00-txnreport etc. That way, the caches will be warm when the
1964 # callbacks run.
1964 # callbacks run.
1965 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1965 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1966 def txnaborthook(tr2):
1966 def txnaborthook(tr2):
1967 """To be run if transaction is aborted
1967 """To be run if transaction is aborted
1968 """
1968 """
1969 reporef().hook('txnabort', throw=False,
1969 reporef().hook('txnabort', throw=False,
1970 **pycompat.strkwargs(tr2.hookargs))
1970 **pycompat.strkwargs(tr2.hookargs))
1971 tr.addabort('txnabort-hook', txnaborthook)
1971 tr.addabort('txnabort-hook', txnaborthook)
1972 # avoid eager cache invalidation. in-memory data should be identical
1972 # avoid eager cache invalidation. in-memory data should be identical
1973 # to stored data if transaction has no error.
1973 # to stored data if transaction has no error.
1974 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1974 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1975 self._transref = weakref.ref(tr)
1975 self._transref = weakref.ref(tr)
1976 scmutil.registersummarycallback(self, tr, desc)
1976 scmutil.registersummarycallback(self, tr, desc)
1977 return tr
1977 return tr
1978
1978
1979 def _journalfiles(self):
1979 def _journalfiles(self):
1980 return ((self.svfs, 'journal'),
1980 return ((self.svfs, 'journal'),
1981 (self.svfs, 'journal.narrowspec'),
1981 (self.svfs, 'journal.narrowspec'),
1982 (self.vfs, 'journal.narrowspec.dirstate'),
1982 (self.vfs, 'journal.narrowspec.dirstate'),
1983 (self.vfs, 'journal.dirstate'),
1983 (self.vfs, 'journal.dirstate'),
1984 (self.vfs, 'journal.branch'),
1984 (self.vfs, 'journal.branch'),
1985 (self.vfs, 'journal.desc'),
1985 (self.vfs, 'journal.desc'),
1986 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1986 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1987 (self.svfs, 'journal.phaseroots'))
1987 (self.svfs, 'journal.phaseroots'))
1988
1988
1989 def undofiles(self):
1989 def undofiles(self):
1990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1991
1991
1992 @unfilteredmethod
1992 @unfilteredmethod
1993 def _writejournal(self, desc):
1993 def _writejournal(self, desc):
1994 self.dirstate.savebackup(None, 'journal.dirstate')
1994 self.dirstate.savebackup(None, 'journal.dirstate')
1995 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1995 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1996 narrowspec.savebackup(self, 'journal.narrowspec')
1996 narrowspec.savebackup(self, 'journal.narrowspec')
1997 self.vfs.write("journal.branch",
1997 self.vfs.write("journal.branch",
1998 encoding.fromlocal(self.dirstate.branch()))
1998 encoding.fromlocal(self.dirstate.branch()))
1999 self.vfs.write("journal.desc",
1999 self.vfs.write("journal.desc",
2000 "%d\n%s\n" % (len(self), desc))
2000 "%d\n%s\n" % (len(self), desc))
2001 bookmarksvfs = bookmarks.bookmarksvfs(self)
2001 bookmarksvfs = bookmarks.bookmarksvfs(self)
2002 bookmarksvfs.write("journal.bookmarks",
2002 bookmarksvfs.write("journal.bookmarks",
2003 bookmarksvfs.tryread("bookmarks"))
2003 bookmarksvfs.tryread("bookmarks"))
2004 self.svfs.write("journal.phaseroots",
2004 self.svfs.write("journal.phaseroots",
2005 self.svfs.tryread("phaseroots"))
2005 self.svfs.tryread("phaseroots"))
2006
2006
2007 def recover(self):
2007 def recover(self):
2008 with self.lock():
2008 with self.lock():
2009 if self.svfs.exists("journal"):
2009 if self.svfs.exists("journal"):
2010 self.ui.status(_("rolling back interrupted transaction\n"))
2010 self.ui.status(_("rolling back interrupted transaction\n"))
2011 vfsmap = {'': self.svfs,
2011 vfsmap = {'': self.svfs,
2012 'plain': self.vfs,}
2012 'plain': self.vfs,}
2013 transaction.rollback(self.svfs, vfsmap, "journal",
2013 transaction.rollback(self.svfs, vfsmap, "journal",
2014 self.ui.warn,
2014 self.ui.warn,
2015 checkambigfiles=_cachedfiles)
2015 checkambigfiles=_cachedfiles)
2016 self.invalidate()
2016 self.invalidate()
2017 return True
2017 return True
2018 else:
2018 else:
2019 self.ui.warn(_("no interrupted transaction available\n"))
2019 self.ui.warn(_("no interrupted transaction available\n"))
2020 return False
2020 return False
2021
2021
2022 def rollback(self, dryrun=False, force=False):
2022 def rollback(self, dryrun=False, force=False):
2023 wlock = lock = dsguard = None
2023 wlock = lock = dsguard = None
2024 try:
2024 try:
2025 wlock = self.wlock()
2025 wlock = self.wlock()
2026 lock = self.lock()
2026 lock = self.lock()
2027 if self.svfs.exists("undo"):
2027 if self.svfs.exists("undo"):
2028 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2028 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2029
2029
2030 return self._rollback(dryrun, force, dsguard)
2030 return self._rollback(dryrun, force, dsguard)
2031 else:
2031 else:
2032 self.ui.warn(_("no rollback information available\n"))
2032 self.ui.warn(_("no rollback information available\n"))
2033 return 1
2033 return 1
2034 finally:
2034 finally:
2035 release(dsguard, lock, wlock)
2035 release(dsguard, lock, wlock)
2036
2036
2037 @unfilteredmethod # Until we get smarter cache management
2037 @unfilteredmethod # Until we get smarter cache management
2038 def _rollback(self, dryrun, force, dsguard):
2038 def _rollback(self, dryrun, force, dsguard):
2039 ui = self.ui
2039 ui = self.ui
2040 try:
2040 try:
2041 args = self.vfs.read('undo.desc').splitlines()
2041 args = self.vfs.read('undo.desc').splitlines()
2042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2043 if len(args) >= 3:
2043 if len(args) >= 3:
2044 detail = args[2]
2044 detail = args[2]
2045 oldtip = oldlen - 1
2045 oldtip = oldlen - 1
2046
2046
2047 if detail and ui.verbose:
2047 if detail and ui.verbose:
2048 msg = (_('repository tip rolled back to revision %d'
2048 msg = (_('repository tip rolled back to revision %d'
2049 ' (undo %s: %s)\n')
2049 ' (undo %s: %s)\n')
2050 % (oldtip, desc, detail))
2050 % (oldtip, desc, detail))
2051 else:
2051 else:
2052 msg = (_('repository tip rolled back to revision %d'
2052 msg = (_('repository tip rolled back to revision %d'
2053 ' (undo %s)\n')
2053 ' (undo %s)\n')
2054 % (oldtip, desc))
2054 % (oldtip, desc))
2055 except IOError:
2055 except IOError:
2056 msg = _('rolling back unknown transaction\n')
2056 msg = _('rolling back unknown transaction\n')
2057 desc = None
2057 desc = None
2058
2058
2059 if not force and self['.'] != self['tip'] and desc == 'commit':
2059 if not force and self['.'] != self['tip'] and desc == 'commit':
2060 raise error.Abort(
2060 raise error.Abort(
2061 _('rollback of last commit while not checked out '
2061 _('rollback of last commit while not checked out '
2062 'may lose data'), hint=_('use -f to force'))
2062 'may lose data'), hint=_('use -f to force'))
2063
2063
2064 ui.status(msg)
2064 ui.status(msg)
2065 if dryrun:
2065 if dryrun:
2066 return 0
2066 return 0
2067
2067
2068 parents = self.dirstate.parents()
2068 parents = self.dirstate.parents()
2069 self.destroying()
2069 self.destroying()
2070 vfsmap = {'plain': self.vfs, '': self.svfs}
2070 vfsmap = {'plain': self.vfs, '': self.svfs}
2071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2072 checkambigfiles=_cachedfiles)
2072 checkambigfiles=_cachedfiles)
2073 bookmarksvfs = bookmarks.bookmarksvfs(self)
2073 bookmarksvfs = bookmarks.bookmarksvfs(self)
2074 if bookmarksvfs.exists('undo.bookmarks'):
2074 if bookmarksvfs.exists('undo.bookmarks'):
2075 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2075 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2076 if self.svfs.exists('undo.phaseroots'):
2076 if self.svfs.exists('undo.phaseroots'):
2077 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2077 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2078 self.invalidate()
2078 self.invalidate()
2079
2079
2080 parentgone = any(p not in self.changelog.nodemap for p in parents)
2080 parentgone = any(p not in self.changelog.nodemap for p in parents)
2081 if parentgone:
2081 if parentgone:
2082 # prevent dirstateguard from overwriting already restored one
2082 # prevent dirstateguard from overwriting already restored one
2083 dsguard.close()
2083 dsguard.close()
2084
2084
2085 narrowspec.restorebackup(self, 'undo.narrowspec')
2085 narrowspec.restorebackup(self, 'undo.narrowspec')
2086 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2086 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2087 self.dirstate.restorebackup(None, 'undo.dirstate')
2087 self.dirstate.restorebackup(None, 'undo.dirstate')
2088 try:
2088 try:
2089 branch = self.vfs.read('undo.branch')
2089 branch = self.vfs.read('undo.branch')
2090 self.dirstate.setbranch(encoding.tolocal(branch))
2090 self.dirstate.setbranch(encoding.tolocal(branch))
2091 except IOError:
2091 except IOError:
2092 ui.warn(_('named branch could not be reset: '
2092 ui.warn(_('named branch could not be reset: '
2093 'current branch is still \'%s\'\n')
2093 'current branch is still \'%s\'\n')
2094 % self.dirstate.branch())
2094 % self.dirstate.branch())
2095
2095
2096 parents = tuple([p.rev() for p in self[None].parents()])
2096 parents = tuple([p.rev() for p in self[None].parents()])
2097 if len(parents) > 1:
2097 if len(parents) > 1:
2098 ui.status(_('working directory now based on '
2098 ui.status(_('working directory now based on '
2099 'revisions %d and %d\n') % parents)
2099 'revisions %d and %d\n') % parents)
2100 else:
2100 else:
2101 ui.status(_('working directory now based on '
2101 ui.status(_('working directory now based on '
2102 'revision %d\n') % parents)
2102 'revision %d\n') % parents)
2103 mergemod.mergestate.clean(self, self['.'].node())
2103 mergemod.mergestate.clean(self, self['.'].node())
2104
2104
2105 # TODO: if we know which new heads may result from this rollback, pass
2105 # TODO: if we know which new heads may result from this rollback, pass
2106 # them to destroy(), which will prevent the branchhead cache from being
2106 # them to destroy(), which will prevent the branchhead cache from being
2107 # invalidated.
2107 # invalidated.
2108 self.destroyed()
2108 self.destroyed()
2109 return 0
2109 return 0
2110
2110
2111 def _buildcacheupdater(self, newtransaction):
2111 def _buildcacheupdater(self, newtransaction):
2112 """called during transaction to build the callback updating cache
2112 """called during transaction to build the callback updating cache
2113
2113
2114 Lives on the repository to help extension who might want to augment
2114 Lives on the repository to help extension who might want to augment
2115 this logic. For this purpose, the created transaction is passed to the
2115 this logic. For this purpose, the created transaction is passed to the
2116 method.
2116 method.
2117 """
2117 """
2118 # we must avoid cyclic reference between repo and transaction.
2118 # we must avoid cyclic reference between repo and transaction.
2119 reporef = weakref.ref(self)
2119 reporef = weakref.ref(self)
2120 def updater(tr):
2120 def updater(tr):
2121 repo = reporef()
2121 repo = reporef()
2122 repo.updatecaches(tr)
2122 repo.updatecaches(tr)
2123 return updater
2123 return updater
2124
2124
2125 @unfilteredmethod
2125 @unfilteredmethod
2126 def updatecaches(self, tr=None, full=False):
2126 def updatecaches(self, tr=None, full=False):
2127 """warm appropriate caches
2127 """warm appropriate caches
2128
2128
2129 If this function is called after a transaction closed. The transaction
2129 If this function is called after a transaction closed. The transaction
2130 will be available in the 'tr' argument. This can be used to selectively
2130 will be available in the 'tr' argument. This can be used to selectively
2131 update caches relevant to the changes in that transaction.
2131 update caches relevant to the changes in that transaction.
2132
2132
2133 If 'full' is set, make sure all caches the function knows about have
2133 If 'full' is set, make sure all caches the function knows about have
2134 up-to-date data. Even the ones usually loaded more lazily.
2134 up-to-date data. Even the ones usually loaded more lazily.
2135 """
2135 """
2136 if tr is not None and tr.hookargs.get('source') == 'strip':
2136 if tr is not None and tr.hookargs.get('source') == 'strip':
2137 # During strip, many caches are invalid but
2137 # During strip, many caches are invalid but
2138 # later call to `destroyed` will refresh them.
2138 # later call to `destroyed` will refresh them.
2139 return
2139 return
2140
2140
2141 if tr is None or tr.changes['origrepolen'] < len(self):
2141 if tr is None or tr.changes['origrepolen'] < len(self):
2142 # accessing the 'ser ved' branchmap should refresh all the others,
2142 # accessing the 'ser ved' branchmap should refresh all the others,
2143 self.ui.debug('updating the branch cache\n')
2143 self.ui.debug('updating the branch cache\n')
2144 self.filtered('served').branchmap()
2144 self.filtered('served').branchmap()
2145 self.filtered('served.hidden').branchmap()
2145 self.filtered('served.hidden').branchmap()
2146
2146
2147 if full:
2147 if full:
2148 unfi = self.unfiltered()
2148 unfi = self.unfiltered()
2149 rbc = unfi.revbranchcache()
2149 rbc = unfi.revbranchcache()
2150 for r in unfi.changelog:
2150 for r in unfi.changelog:
2151 rbc.branchinfo(r)
2151 rbc.branchinfo(r)
2152 rbc.write()
2152 rbc.write()
2153
2153
2154 # ensure the working copy parents are in the manifestfulltextcache
2154 # ensure the working copy parents are in the manifestfulltextcache
2155 for ctx in self['.'].parents():
2155 for ctx in self['.'].parents():
2156 ctx.manifest() # accessing the manifest is enough
2156 ctx.manifest() # accessing the manifest is enough
2157
2157
2158 # accessing fnode cache warms the cache
2158 # accessing fnode cache warms the cache
2159 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2159 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2160 # accessing tags warm the cache
2160 # accessing tags warm the cache
2161 self.tags()
2161 self.tags()
2162 self.filtered('served').tags()
2162 self.filtered('served').tags()
2163
2163
2164 def invalidatecaches(self):
2164 def invalidatecaches(self):
2165
2165
2166 if r'_tagscache' in vars(self):
2166 if r'_tagscache' in vars(self):
2167 # can't use delattr on proxy
2167 # can't use delattr on proxy
2168 del self.__dict__[r'_tagscache']
2168 del self.__dict__[r'_tagscache']
2169
2169
2170 self._branchcaches.clear()
2170 self._branchcaches.clear()
2171 self.invalidatevolatilesets()
2171 self.invalidatevolatilesets()
2172 self._sparsesignaturecache.clear()
2172 self._sparsesignaturecache.clear()
2173
2173
2174 def invalidatevolatilesets(self):
2174 def invalidatevolatilesets(self):
2175 self.filteredrevcache.clear()
2175 self.filteredrevcache.clear()
2176 obsolete.clearobscaches(self)
2176 obsolete.clearobscaches(self)
2177
2177
2178 def invalidatedirstate(self):
2178 def invalidatedirstate(self):
2179 '''Invalidates the dirstate, causing the next call to dirstate
2179 '''Invalidates the dirstate, causing the next call to dirstate
2180 to check if it was modified since the last time it was read,
2180 to check if it was modified since the last time it was read,
2181 rereading it if it has.
2181 rereading it if it has.
2182
2182
2183 This is different to dirstate.invalidate() that it doesn't always
2183 This is different to dirstate.invalidate() that it doesn't always
2184 rereads the dirstate. Use dirstate.invalidate() if you want to
2184 rereads the dirstate. Use dirstate.invalidate() if you want to
2185 explicitly read the dirstate again (i.e. restoring it to a previous
2185 explicitly read the dirstate again (i.e. restoring it to a previous
2186 known good state).'''
2186 known good state).'''
2187 if hasunfilteredcache(self, r'dirstate'):
2187 if hasunfilteredcache(self, r'dirstate'):
2188 for k in self.dirstate._filecache:
2188 for k in self.dirstate._filecache:
2189 try:
2189 try:
2190 delattr(self.dirstate, k)
2190 delattr(self.dirstate, k)
2191 except AttributeError:
2191 except AttributeError:
2192 pass
2192 pass
2193 delattr(self.unfiltered(), r'dirstate')
2193 delattr(self.unfiltered(), r'dirstate')
2194
2194
2195 def invalidate(self, clearfilecache=False):
2195 def invalidate(self, clearfilecache=False):
2196 '''Invalidates both store and non-store parts other than dirstate
2196 '''Invalidates both store and non-store parts other than dirstate
2197
2197
2198 If a transaction is running, invalidation of store is omitted,
2198 If a transaction is running, invalidation of store is omitted,
2199 because discarding in-memory changes might cause inconsistency
2199 because discarding in-memory changes might cause inconsistency
2200 (e.g. incomplete fncache causes unintentional failure, but
2200 (e.g. incomplete fncache causes unintentional failure, but
2201 redundant one doesn't).
2201 redundant one doesn't).
2202 '''
2202 '''
2203 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2203 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2204 for k in list(self._filecache.keys()):
2204 for k in list(self._filecache.keys()):
2205 # dirstate is invalidated separately in invalidatedirstate()
2205 # dirstate is invalidated separately in invalidatedirstate()
2206 if k == 'dirstate':
2206 if k == 'dirstate':
2207 continue
2207 continue
2208 if (k == 'changelog' and
2208 if (k == 'changelog' and
2209 self.currenttransaction() and
2209 self.currenttransaction() and
2210 self.changelog._delayed):
2210 self.changelog._delayed):
2211 # The changelog object may store unwritten revisions. We don't
2211 # The changelog object may store unwritten revisions. We don't
2212 # want to lose them.
2212 # want to lose them.
2213 # TODO: Solve the problem instead of working around it.
2213 # TODO: Solve the problem instead of working around it.
2214 continue
2214 continue
2215
2215
2216 if clearfilecache:
2216 if clearfilecache:
2217 del self._filecache[k]
2217 del self._filecache[k]
2218 try:
2218 try:
2219 delattr(unfiltered, k)
2219 delattr(unfiltered, k)
2220 except AttributeError:
2220 except AttributeError:
2221 pass
2221 pass
2222 self.invalidatecaches()
2222 self.invalidatecaches()
2223 if not self.currenttransaction():
2223 if not self.currenttransaction():
2224 # TODO: Changing contents of store outside transaction
2224 # TODO: Changing contents of store outside transaction
2225 # causes inconsistency. We should make in-memory store
2225 # causes inconsistency. We should make in-memory store
2226 # changes detectable, and abort if changed.
2226 # changes detectable, and abort if changed.
2227 self.store.invalidatecaches()
2227 self.store.invalidatecaches()
2228
2228
2229 def invalidateall(self):
2229 def invalidateall(self):
2230 '''Fully invalidates both store and non-store parts, causing the
2230 '''Fully invalidates both store and non-store parts, causing the
2231 subsequent operation to reread any outside changes.'''
2231 subsequent operation to reread any outside changes.'''
2232 # extension should hook this to invalidate its caches
2232 # extension should hook this to invalidate its caches
2233 self.invalidate()
2233 self.invalidate()
2234 self.invalidatedirstate()
2234 self.invalidatedirstate()
2235
2235
2236 @unfilteredmethod
2236 @unfilteredmethod
2237 def _refreshfilecachestats(self, tr):
2237 def _refreshfilecachestats(self, tr):
2238 """Reload stats of cached files so that they are flagged as valid"""
2238 """Reload stats of cached files so that they are flagged as valid"""
2239 for k, ce in self._filecache.items():
2239 for k, ce in self._filecache.items():
2240 k = pycompat.sysstr(k)
2240 k = pycompat.sysstr(k)
2241 if k == r'dirstate' or k not in self.__dict__:
2241 if k == r'dirstate' or k not in self.__dict__:
2242 continue
2242 continue
2243 ce.refresh()
2243 ce.refresh()
2244
2244
2245 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2245 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2246 inheritchecker=None, parentenvvar=None):
2246 inheritchecker=None, parentenvvar=None):
2247 parentlock = None
2247 parentlock = None
2248 # the contents of parentenvvar are used by the underlying lock to
2248 # the contents of parentenvvar are used by the underlying lock to
2249 # determine whether it can be inherited
2249 # determine whether it can be inherited
2250 if parentenvvar is not None:
2250 if parentenvvar is not None:
2251 parentlock = encoding.environ.get(parentenvvar)
2251 parentlock = encoding.environ.get(parentenvvar)
2252
2252
2253 timeout = 0
2253 timeout = 0
2254 warntimeout = 0
2254 warntimeout = 0
2255 if wait:
2255 if wait:
2256 timeout = self.ui.configint("ui", "timeout")
2256 timeout = self.ui.configint("ui", "timeout")
2257 warntimeout = self.ui.configint("ui", "timeout.warn")
2257 warntimeout = self.ui.configint("ui", "timeout.warn")
2258 # internal config: ui.signal-safe-lock
2258 # internal config: ui.signal-safe-lock
2259 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2259 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2260
2260
2261 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2261 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2262 releasefn=releasefn,
2262 releasefn=releasefn,
2263 acquirefn=acquirefn, desc=desc,
2263 acquirefn=acquirefn, desc=desc,
2264 inheritchecker=inheritchecker,
2264 inheritchecker=inheritchecker,
2265 parentlock=parentlock,
2265 parentlock=parentlock,
2266 signalsafe=signalsafe)
2266 signalsafe=signalsafe)
2267 return l
2267 return l
2268
2268
2269 def _afterlock(self, callback):
2269 def _afterlock(self, callback):
2270 """add a callback to be run when the repository is fully unlocked
2270 """add a callback to be run when the repository is fully unlocked
2271
2271
2272 The callback will be executed when the outermost lock is released
2272 The callback will be executed when the outermost lock is released
2273 (with wlock being higher level than 'lock')."""
2273 (with wlock being higher level than 'lock')."""
2274 for ref in (self._wlockref, self._lockref):
2274 for ref in (self._wlockref, self._lockref):
2275 l = ref and ref()
2275 l = ref and ref()
2276 if l and l.held:
2276 if l and l.held:
2277 l.postrelease.append(callback)
2277 l.postrelease.append(callback)
2278 break
2278 break
2279 else: # no lock have been found.
2279 else: # no lock have been found.
2280 callback()
2280 callback()
2281
2281
2282 def lock(self, wait=True):
2282 def lock(self, wait=True):
2283 '''Lock the repository store (.hg/store) and return a weak reference
2283 '''Lock the repository store (.hg/store) and return a weak reference
2284 to the lock. Use this before modifying the store (e.g. committing or
2284 to the lock. Use this before modifying the store (e.g. committing or
2285 stripping). If you are opening a transaction, get a lock as well.)
2285 stripping). If you are opening a transaction, get a lock as well.)
2286
2286
2287 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2287 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2288 'wlock' first to avoid a dead-lock hazard.'''
2288 'wlock' first to avoid a dead-lock hazard.'''
2289 l = self._currentlock(self._lockref)
2289 l = self._currentlock(self._lockref)
2290 if l is not None:
2290 if l is not None:
2291 l.lock()
2291 l.lock()
2292 return l
2292 return l
2293
2293
2294 l = self._lock(vfs=self.svfs,
2294 l = self._lock(vfs=self.svfs,
2295 lockname="lock",
2295 lockname="lock",
2296 wait=wait,
2296 wait=wait,
2297 releasefn=None,
2297 releasefn=None,
2298 acquirefn=self.invalidate,
2298 acquirefn=self.invalidate,
2299 desc=_('repository %s') % self.origroot)
2299 desc=_('repository %s') % self.origroot)
2300 self._lockref = weakref.ref(l)
2300 self._lockref = weakref.ref(l)
2301 return l
2301 return l
2302
2302
2303 def _wlockchecktransaction(self):
2303 def _wlockchecktransaction(self):
2304 if self.currenttransaction() is not None:
2304 if self.currenttransaction() is not None:
2305 raise error.LockInheritanceContractViolation(
2305 raise error.LockInheritanceContractViolation(
2306 'wlock cannot be inherited in the middle of a transaction')
2306 'wlock cannot be inherited in the middle of a transaction')
2307
2307
2308 def wlock(self, wait=True):
2308 def wlock(self, wait=True):
2309 '''Lock the non-store parts of the repository (everything under
2309 '''Lock the non-store parts of the repository (everything under
2310 .hg except .hg/store) and return a weak reference to the lock.
2310 .hg except .hg/store) and return a weak reference to the lock.
2311
2311
2312 Use this before modifying files in .hg.
2312 Use this before modifying files in .hg.
2313
2313
2314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2315 'wlock' first to avoid a dead-lock hazard.'''
2315 'wlock' first to avoid a dead-lock hazard.'''
2316 l = self._wlockref and self._wlockref()
2316 l = self._wlockref and self._wlockref()
2317 if l is not None and l.held:
2317 if l is not None and l.held:
2318 l.lock()
2318 l.lock()
2319 return l
2319 return l
2320
2320
2321 # We do not need to check for non-waiting lock acquisition. Such
2321 # We do not need to check for non-waiting lock acquisition. Such
2322 # acquisition would not cause dead-lock as they would just fail.
2322 # acquisition would not cause dead-lock as they would just fail.
2323 if wait and (self.ui.configbool('devel', 'all-warnings')
2323 if wait and (self.ui.configbool('devel', 'all-warnings')
2324 or self.ui.configbool('devel', 'check-locks')):
2324 or self.ui.configbool('devel', 'check-locks')):
2325 if self._currentlock(self._lockref) is not None:
2325 if self._currentlock(self._lockref) is not None:
2326 self.ui.develwarn('"wlock" acquired after "lock"')
2326 self.ui.develwarn('"wlock" acquired after "lock"')
2327
2327
2328 def unlock():
2328 def unlock():
2329 if self.dirstate.pendingparentchange():
2329 if self.dirstate.pendingparentchange():
2330 self.dirstate.invalidate()
2330 self.dirstate.invalidate()
2331 else:
2331 else:
2332 self.dirstate.write(None)
2332 self.dirstate.write(None)
2333
2333
2334 self._filecache['dirstate'].refresh()
2334 self._filecache['dirstate'].refresh()
2335
2335
2336 l = self._lock(self.vfs, "wlock", wait, unlock,
2336 l = self._lock(self.vfs, "wlock", wait, unlock,
2337 self.invalidatedirstate, _('working directory of %s') %
2337 self.invalidatedirstate, _('working directory of %s') %
2338 self.origroot,
2338 self.origroot,
2339 inheritchecker=self._wlockchecktransaction,
2339 inheritchecker=self._wlockchecktransaction,
2340 parentenvvar='HG_WLOCK_LOCKER')
2340 parentenvvar='HG_WLOCK_LOCKER')
2341 self._wlockref = weakref.ref(l)
2341 self._wlockref = weakref.ref(l)
2342 return l
2342 return l
2343
2343
2344 def _currentlock(self, lockref):
2344 def _currentlock(self, lockref):
2345 """Returns the lock if it's held, or None if it's not."""
2345 """Returns the lock if it's held, or None if it's not."""
2346 if lockref is None:
2346 if lockref is None:
2347 return None
2347 return None
2348 l = lockref()
2348 l = lockref()
2349 if l is None or not l.held:
2349 if l is None or not l.held:
2350 return None
2350 return None
2351 return l
2351 return l
2352
2352
2353 def currentwlock(self):
2353 def currentwlock(self):
2354 """Returns the wlock if it's held, or None if it's not."""
2354 """Returns the wlock if it's held, or None if it's not."""
2355 return self._currentlock(self._wlockref)
2355 return self._currentlock(self._wlockref)
2356
2356
2357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2358 includecopymeta):
2358 includecopymeta):
2359 """
2359 """
2360 commit an individual file as part of a larger transaction
2360 commit an individual file as part of a larger transaction
2361 """
2361 """
2362
2362
2363 fname = fctx.path()
2363 fname = fctx.path()
2364 fparent1 = manifest1.get(fname, nullid)
2364 fparent1 = manifest1.get(fname, nullid)
2365 fparent2 = manifest2.get(fname, nullid)
2365 fparent2 = manifest2.get(fname, nullid)
2366 if isinstance(fctx, context.filectx):
2366 if isinstance(fctx, context.filectx):
2367 node = fctx.filenode()
2367 node = fctx.filenode()
2368 if node in [fparent1, fparent2]:
2368 if node in [fparent1, fparent2]:
2369 self.ui.debug('reusing %s filelog entry\n' % fname)
2369 self.ui.debug('reusing %s filelog entry\n' % fname)
2370 if ((fparent1 != nullid and
2370 if ((fparent1 != nullid and
2371 manifest1.flags(fname) != fctx.flags()) or
2371 manifest1.flags(fname) != fctx.flags()) or
2372 (fparent2 != nullid and
2372 (fparent2 != nullid and
2373 manifest2.flags(fname) != fctx.flags())):
2373 manifest2.flags(fname) != fctx.flags())):
2374 changelist.append(fname)
2374 changelist.append(fname)
2375 return node
2375 return node
2376
2376
2377 flog = self.file(fname)
2377 flog = self.file(fname)
2378 meta = {}
2378 meta = {}
2379 cfname = fctx.copysource()
2379 cfname = fctx.copysource()
2380 if cfname and cfname != fname:
2380 if cfname and cfname != fname:
2381 # Mark the new revision of this file as a copy of another
2381 # Mark the new revision of this file as a copy of another
2382 # file. This copy data will effectively act as a parent
2382 # file. This copy data will effectively act as a parent
2383 # of this new revision. If this is a merge, the first
2383 # of this new revision. If this is a merge, the first
2384 # parent will be the nullid (meaning "look up the copy data")
2384 # parent will be the nullid (meaning "look up the copy data")
2385 # and the second one will be the other parent. For example:
2385 # and the second one will be the other parent. For example:
2386 #
2386 #
2387 # 0 --- 1 --- 3 rev1 changes file foo
2387 # 0 --- 1 --- 3 rev1 changes file foo
2388 # \ / rev2 renames foo to bar and changes it
2388 # \ / rev2 renames foo to bar and changes it
2389 # \- 2 -/ rev3 should have bar with all changes and
2389 # \- 2 -/ rev3 should have bar with all changes and
2390 # should record that bar descends from
2390 # should record that bar descends from
2391 # bar in rev2 and foo in rev1
2391 # bar in rev2 and foo in rev1
2392 #
2392 #
2393 # this allows this merge to succeed:
2393 # this allows this merge to succeed:
2394 #
2394 #
2395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2396 # \ / merging rev3 and rev4 should use bar@rev2
2396 # \ / merging rev3 and rev4 should use bar@rev2
2397 # \- 2 --- 4 as the merge base
2397 # \- 2 --- 4 as the merge base
2398 #
2398 #
2399
2399
2400 cnode = manifest1.get(cfname)
2400 cnode = manifest1.get(cfname)
2401 newfparent = fparent2
2401 newfparent = fparent2
2402
2402
2403 if manifest2: # branch merge
2403 if manifest2: # branch merge
2404 if fparent2 == nullid or cnode is None: # copied on remote side
2404 if fparent2 == nullid or cnode is None: # copied on remote side
2405 if cfname in manifest2:
2405 if cfname in manifest2:
2406 cnode = manifest2[cfname]
2406 cnode = manifest2[cfname]
2407 newfparent = fparent1
2407 newfparent = fparent1
2408
2408
2409 # Here, we used to search backwards through history to try to find
2409 # Here, we used to search backwards through history to try to find
2410 # where the file copy came from if the source of a copy was not in
2410 # where the file copy came from if the source of a copy was not in
2411 # the parent directory. However, this doesn't actually make sense to
2411 # the parent directory. However, this doesn't actually make sense to
2412 # do (what does a copy from something not in your working copy even
2412 # do (what does a copy from something not in your working copy even
2413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2414 # the user that copy information was dropped, so if they didn't
2414 # the user that copy information was dropped, so if they didn't
2415 # expect this outcome it can be fixed, but this is the correct
2415 # expect this outcome it can be fixed, but this is the correct
2416 # behavior in this circumstance.
2416 # behavior in this circumstance.
2417
2417
2418 if cnode:
2418 if cnode:
2419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2420 if includecopymeta:
2420 if includecopymeta:
2421 meta["copy"] = cfname
2421 meta["copy"] = cfname
2422 meta["copyrev"] = hex(cnode)
2422 meta["copyrev"] = hex(cnode)
2423 fparent1, fparent2 = nullid, newfparent
2423 fparent1, fparent2 = nullid, newfparent
2424 else:
2424 else:
2425 self.ui.warn(_("warning: can't find ancestor for '%s' "
2425 self.ui.warn(_("warning: can't find ancestor for '%s' "
2426 "copied from '%s'!\n") % (fname, cfname))
2426 "copied from '%s'!\n") % (fname, cfname))
2427
2427
2428 elif fparent1 == nullid:
2428 elif fparent1 == nullid:
2429 fparent1, fparent2 = fparent2, nullid
2429 fparent1, fparent2 = fparent2, nullid
2430 elif fparent2 != nullid:
2430 elif fparent2 != nullid:
2431 # is one parent an ancestor of the other?
2431 # is one parent an ancestor of the other?
2432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2433 if fparent1 in fparentancestors:
2433 if fparent1 in fparentancestors:
2434 fparent1, fparent2 = fparent2, nullid
2434 fparent1, fparent2 = fparent2, nullid
2435 elif fparent2 in fparentancestors:
2435 elif fparent2 in fparentancestors:
2436 fparent2 = nullid
2436 fparent2 = nullid
2437
2437
2438 # is the file changed?
2438 # is the file changed?
2439 text = fctx.data()
2439 text = fctx.data()
2440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2441 changelist.append(fname)
2441 changelist.append(fname)
2442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2443 # are just the flags changed during merge?
2443 # are just the flags changed during merge?
2444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2445 changelist.append(fname)
2445 changelist.append(fname)
2446
2446
2447 return fparent1
2447 return fparent1
2448
2448
2449 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2449 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2450 """check for commit arguments that aren't committable"""
2450 """check for commit arguments that aren't committable"""
2451 if match.isexact() or match.prefix():
2451 if match.isexact() or match.prefix():
2452 matched = set(status.modified + status.added + status.removed)
2452 matched = set(status.modified + status.added + status.removed)
2453
2453
2454 for f in match.files():
2454 for f in match.files():
2455 f = self.dirstate.normalize(f)
2455 f = self.dirstate.normalize(f)
2456 if f == '.' or f in matched or f in wctx.substate:
2456 if f == '.' or f in matched or f in wctx.substate:
2457 continue
2457 continue
2458 if f in status.deleted:
2458 if f in status.deleted:
2459 fail(f, _('file not found!'))
2459 fail(f, _('file not found!'))
2460 if f in vdirs: # visited directory
2460 if f in vdirs: # visited directory
2461 d = f + '/'
2461 d = f + '/'
2462 for mf in matched:
2462 for mf in matched:
2463 if mf.startswith(d):
2463 if mf.startswith(d):
2464 break
2464 break
2465 else:
2465 else:
2466 fail(f, _("no match under directory!"))
2466 fail(f, _("no match under directory!"))
2467 elif f not in self.dirstate:
2467 elif f not in self.dirstate:
2468 fail(f, _("file not tracked!"))
2468 fail(f, _("file not tracked!"))
2469
2469
2470 @unfilteredmethod
2470 @unfilteredmethod
2471 def commit(self, text="", user=None, date=None, match=None, force=False,
2471 def commit(self, text="", user=None, date=None, match=None, force=False,
2472 editor=False, extra=None):
2472 editor=False, extra=None):
2473 """Add a new revision to current repository.
2473 """Add a new revision to current repository.
2474
2474
2475 Revision information is gathered from the working directory,
2475 Revision information is gathered from the working directory,
2476 match can be used to filter the committed files. If editor is
2476 match can be used to filter the committed files. If editor is
2477 supplied, it is called to get a commit message.
2477 supplied, it is called to get a commit message.
2478 """
2478 """
2479 if extra is None:
2479 if extra is None:
2480 extra = {}
2480 extra = {}
2481
2481
2482 def fail(f, msg):
2482 def fail(f, msg):
2483 raise error.Abort('%s: %s' % (f, msg))
2483 raise error.Abort('%s: %s' % (f, msg))
2484
2484
2485 if not match:
2485 if not match:
2486 match = matchmod.always()
2486 match = matchmod.always()
2487
2487
2488 if not force:
2488 if not force:
2489 vdirs = []
2489 vdirs = []
2490 match.explicitdir = vdirs.append
2490 match.explicitdir = vdirs.append
2491 match.bad = fail
2491 match.bad = fail
2492
2492
2493 # lock() for recent changelog (see issue4368)
2493 # lock() for recent changelog (see issue4368)
2494 with self.wlock(), self.lock():
2494 with self.wlock(), self.lock():
2495 wctx = self[None]
2495 wctx = self[None]
2496 merge = len(wctx.parents()) > 1
2496 merge = len(wctx.parents()) > 1
2497
2497
2498 if not force and merge and not match.always():
2498 if not force and merge and not match.always():
2499 raise error.Abort(_('cannot partially commit a merge '
2499 raise error.Abort(_('cannot partially commit a merge '
2500 '(do not specify files or patterns)'))
2500 '(do not specify files or patterns)'))
2501
2501
2502 status = self.status(match=match, clean=force)
2502 status = self.status(match=match, clean=force)
2503 if force:
2503 if force:
2504 status.modified.extend(status.clean) # mq may commit clean files
2504 status.modified.extend(status.clean) # mq may commit clean files
2505
2505
2506 # check subrepos
2506 # check subrepos
2507 subs, commitsubs, newstate = subrepoutil.precommit(
2507 subs, commitsubs, newstate = subrepoutil.precommit(
2508 self.ui, wctx, status, match, force=force)
2508 self.ui, wctx, status, match, force=force)
2509
2509
2510 # make sure all explicit patterns are matched
2510 # make sure all explicit patterns are matched
2511 if not force:
2511 if not force:
2512 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2512 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2513
2513
2514 cctx = context.workingcommitctx(self, status,
2514 cctx = context.workingcommitctx(self, status,
2515 text, user, date, extra)
2515 text, user, date, extra)
2516
2516
2517 # internal config: ui.allowemptycommit
2517 # internal config: ui.allowemptycommit
2518 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2518 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2519 or extra.get('close') or merge or cctx.files()
2519 or extra.get('close') or merge or cctx.files()
2520 or self.ui.configbool('ui', 'allowemptycommit'))
2520 or self.ui.configbool('ui', 'allowemptycommit'))
2521 if not allowemptycommit:
2521 if not allowemptycommit:
2522 return None
2522 return None
2523
2523
2524 if merge and cctx.deleted():
2524 if merge and cctx.deleted():
2525 raise error.Abort(_("cannot commit merge with missing files"))
2525 raise error.Abort(_("cannot commit merge with missing files"))
2526
2526
2527 ms = mergemod.mergestate.read(self)
2527 ms = mergemod.mergestate.read(self)
2528 mergeutil.checkunresolved(ms)
2528 mergeutil.checkunresolved(ms)
2529
2529
2530 if editor:
2530 if editor:
2531 cctx._text = editor(self, cctx, subs)
2531 cctx._text = editor(self, cctx, subs)
2532 edited = (text != cctx._text)
2532 edited = (text != cctx._text)
2533
2533
2534 # Save commit message in case this transaction gets rolled back
2534 # Save commit message in case this transaction gets rolled back
2535 # (e.g. by a pretxncommit hook). Leave the content alone on
2535 # (e.g. by a pretxncommit hook). Leave the content alone on
2536 # the assumption that the user will use the same editor again.
2536 # the assumption that the user will use the same editor again.
2537 msgfn = self.savecommitmessage(cctx._text)
2537 msgfn = self.savecommitmessage(cctx._text)
2538
2538
2539 # commit subs and write new state
2539 # commit subs and write new state
2540 if subs:
2540 if subs:
2541 uipathfn = scmutil.getuipathfn(self)
2541 uipathfn = scmutil.getuipathfn(self)
2542 for s in sorted(commitsubs):
2542 for s in sorted(commitsubs):
2543 sub = wctx.sub(s)
2543 sub = wctx.sub(s)
2544 self.ui.status(_('committing subrepository %s\n') %
2544 self.ui.status(_('committing subrepository %s\n') %
2545 uipathfn(subrepoutil.subrelpath(sub)))
2545 uipathfn(subrepoutil.subrelpath(sub)))
2546 sr = sub.commit(cctx._text, user, date)
2546 sr = sub.commit(cctx._text, user, date)
2547 newstate[s] = (newstate[s][0], sr)
2547 newstate[s] = (newstate[s][0], sr)
2548 subrepoutil.writestate(self, newstate)
2548 subrepoutil.writestate(self, newstate)
2549
2549
2550 p1, p2 = self.dirstate.parents()
2550 p1, p2 = self.dirstate.parents()
2551 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2551 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2552 try:
2552 try:
2553 self.hook("precommit", throw=True, parent1=hookp1,
2553 self.hook("precommit", throw=True, parent1=hookp1,
2554 parent2=hookp2)
2554 parent2=hookp2)
2555 with self.transaction('commit'):
2555 with self.transaction('commit'):
2556 ret = self.commitctx(cctx, True)
2556 ret = self.commitctx(cctx, True)
2557 # update bookmarks, dirstate and mergestate
2557 # update bookmarks, dirstate and mergestate
2558 bookmarks.update(self, [p1, p2], ret)
2558 bookmarks.update(self, [p1, p2], ret)
2559 cctx.markcommitted(ret)
2559 cctx.markcommitted(ret)
2560 ms.reset()
2560 ms.reset()
2561 except: # re-raises
2561 except: # re-raises
2562 if edited:
2562 if edited:
2563 self.ui.write(
2563 self.ui.write(
2564 _('note: commit message saved in %s\n') % msgfn)
2564 _('note: commit message saved in %s\n') % msgfn)
2565 raise
2565 raise
2566
2566
2567 def commithook():
2567 def commithook():
2568 # hack for command that use a temporary commit (eg: histedit)
2568 # hack for command that use a temporary commit (eg: histedit)
2569 # temporary commit got stripped before hook release
2569 # temporary commit got stripped before hook release
2570 if self.changelog.hasnode(ret):
2570 if self.changelog.hasnode(ret):
2571 self.hook("commit", node=hex(ret), parent1=hookp1,
2571 self.hook("commit", node=hex(ret), parent1=hookp1,
2572 parent2=hookp2)
2572 parent2=hookp2)
2573 self._afterlock(commithook)
2573 self._afterlock(commithook)
2574 return ret
2574 return ret
2575
2575
2576 @unfilteredmethod
2576 @unfilteredmethod
2577 def commitctx(self, ctx, error=False):
2577 def commitctx(self, ctx, error=False):
2578 """Add a new revision to current repository.
2578 """Add a new revision to current repository.
2579 Revision information is passed via the context argument.
2579 Revision information is passed via the context argument.
2580
2580
2581 ctx.files() should list all files involved in this commit, i.e.
2581 ctx.files() should list all files involved in this commit, i.e.
2582 modified/added/removed files. On merge, it may be wider than the
2582 modified/added/removed files. On merge, it may be wider than the
2583 ctx.files() to be committed, since any file nodes derived directly
2583 ctx.files() to be committed, since any file nodes derived directly
2584 from p1 or p2 are excluded from the committed ctx.files().
2584 from p1 or p2 are excluded from the committed ctx.files().
2585 """
2585 """
2586
2586
2587 p1, p2 = ctx.p1(), ctx.p2()
2587 p1, p2 = ctx.p1(), ctx.p2()
2588 user = ctx.user()
2588 user = ctx.user()
2589
2589
2590 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2590 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2591 writefilecopymeta = writecopiesto != 'changeset-only'
2591 writefilecopymeta = writecopiesto != 'changeset-only'
2592 writechangesetcopy = (writecopiesto in
2593 ('changeset-only', 'compatibility'))
2592 p1copies, p2copies = None, None
2594 p1copies, p2copies = None, None
2593 if writecopiesto in ('changeset-only', 'compatibility'):
2595 if writechangesetcopy:
2594 p1copies = ctx.p1copies()
2596 p1copies = ctx.p1copies()
2595 p2copies = ctx.p2copies()
2597 p2copies = ctx.p2copies()
2598 filesadded, filesremoved = None, None
2596 with self.lock(), self.transaction("commit") as tr:
2599 with self.lock(), self.transaction("commit") as tr:
2597 trp = weakref.proxy(tr)
2600 trp = weakref.proxy(tr)
2598
2601
2599 if ctx.manifestnode():
2602 if ctx.manifestnode():
2600 # reuse an existing manifest revision
2603 # reuse an existing manifest revision
2601 self.ui.debug('reusing known manifest\n')
2604 self.ui.debug('reusing known manifest\n')
2602 mn = ctx.manifestnode()
2605 mn = ctx.manifestnode()
2603 files = ctx.files()
2606 files = ctx.files()
2607 if writechangesetcopy:
2608 filesadded = ctx.filesadded()
2609 filesremoved = ctx.filesremoved()
2604 elif ctx.files():
2610 elif ctx.files():
2605 m1ctx = p1.manifestctx()
2611 m1ctx = p1.manifestctx()
2606 m2ctx = p2.manifestctx()
2612 m2ctx = p2.manifestctx()
2607 mctx = m1ctx.copy()
2613 mctx = m1ctx.copy()
2608
2614
2609 m = mctx.read()
2615 m = mctx.read()
2610 m1 = m1ctx.read()
2616 m1 = m1ctx.read()
2611 m2 = m2ctx.read()
2617 m2 = m2ctx.read()
2612
2618
2613 # check in files
2619 # check in files
2614 added = []
2620 added = []
2615 changed = []
2621 changed = []
2616 removed = list(ctx.removed())
2622 removed = list(ctx.removed())
2617 linkrev = len(self)
2623 linkrev = len(self)
2618 self.ui.note(_("committing files:\n"))
2624 self.ui.note(_("committing files:\n"))
2619 uipathfn = scmutil.getuipathfn(self)
2625 uipathfn = scmutil.getuipathfn(self)
2620 for f in sorted(ctx.modified() + ctx.added()):
2626 for f in sorted(ctx.modified() + ctx.added()):
2621 self.ui.note(uipathfn(f) + "\n")
2627 self.ui.note(uipathfn(f) + "\n")
2622 try:
2628 try:
2623 fctx = ctx[f]
2629 fctx = ctx[f]
2624 if fctx is None:
2630 if fctx is None:
2625 removed.append(f)
2631 removed.append(f)
2626 else:
2632 else:
2627 added.append(f)
2633 added.append(f)
2628 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2634 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2629 trp, changed,
2635 trp, changed,
2630 writefilecopymeta)
2636 writefilecopymeta)
2631 m.setflag(f, fctx.flags())
2637 m.setflag(f, fctx.flags())
2632 except OSError:
2638 except OSError:
2633 self.ui.warn(_("trouble committing %s!\n") %
2639 self.ui.warn(_("trouble committing %s!\n") %
2634 uipathfn(f))
2640 uipathfn(f))
2635 raise
2641 raise
2636 except IOError as inst:
2642 except IOError as inst:
2637 errcode = getattr(inst, 'errno', errno.ENOENT)
2643 errcode = getattr(inst, 'errno', errno.ENOENT)
2638 if error or errcode and errcode != errno.ENOENT:
2644 if error or errcode and errcode != errno.ENOENT:
2639 self.ui.warn(_("trouble committing %s!\n") %
2645 self.ui.warn(_("trouble committing %s!\n") %
2640 uipathfn(f))
2646 uipathfn(f))
2641 raise
2647 raise
2642
2648
2643 # update manifest
2649 # update manifest
2644 removed = [f for f in removed if f in m1 or f in m2]
2650 removed = [f for f in removed if f in m1 or f in m2]
2645 drop = sorted([f for f in removed if f in m])
2651 drop = sorted([f for f in removed if f in m])
2646 for f in drop:
2652 for f in drop:
2647 del m[f]
2653 del m[f]
2648 files = changed + removed
2654 files = changed + removed
2649 md = None
2655 md = None
2650 if not files:
2656 if not files:
2651 # if no "files" actually changed in terms of the changelog,
2657 # if no "files" actually changed in terms of the changelog,
2652 # try hard to detect unmodified manifest entry so that the
2658 # try hard to detect unmodified manifest entry so that the
2653 # exact same commit can be reproduced later on convert.
2659 # exact same commit can be reproduced later on convert.
2654 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2660 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2655 if not files and md:
2661 if not files and md:
2656 self.ui.debug('not reusing manifest (no file change in '
2662 self.ui.debug('not reusing manifest (no file change in '
2657 'changelog, but manifest differs)\n')
2663 'changelog, but manifest differs)\n')
2658 if files or md:
2664 if files or md:
2659 self.ui.note(_("committing manifest\n"))
2665 self.ui.note(_("committing manifest\n"))
2660 # we're using narrowmatch here since it's already applied at
2666 # we're using narrowmatch here since it's already applied at
2661 # other stages (such as dirstate.walk), so we're already
2667 # other stages (such as dirstate.walk), so we're already
2662 # ignoring things outside of narrowspec in most cases. The
2668 # ignoring things outside of narrowspec in most cases. The
2663 # one case where we might have files outside the narrowspec
2669 # one case where we might have files outside the narrowspec
2664 # at this point is merges, and we already error out in the
2670 # at this point is merges, and we already error out in the
2665 # case where the merge has files outside of the narrowspec,
2671 # case where the merge has files outside of the narrowspec,
2666 # so this is safe.
2672 # so this is safe.
2667 mn = mctx.write(trp, linkrev,
2673 mn = mctx.write(trp, linkrev,
2668 p1.manifestnode(), p2.manifestnode(),
2674 p1.manifestnode(), p2.manifestnode(),
2669 added, drop, match=self.narrowmatch())
2675 added, drop, match=self.narrowmatch())
2676
2677 if writechangesetcopy:
2678 filesadded = [f for f in changed
2679 if not (f in m1 or f in m2)]
2680 filesremoved = removed
2670 else:
2681 else:
2671 self.ui.debug('reusing manifest from p1 (listed files '
2682 self.ui.debug('reusing manifest from p1 (listed files '
2672 'actually unchanged)\n')
2683 'actually unchanged)\n')
2673 mn = p1.manifestnode()
2684 mn = p1.manifestnode()
2674 else:
2685 else:
2675 self.ui.debug('reusing manifest from p1 (no file change)\n')
2686 self.ui.debug('reusing manifest from p1 (no file change)\n')
2676 mn = p1.manifestnode()
2687 mn = p1.manifestnode()
2677 files = []
2688 files = []
2678
2689
2679 if writecopiesto == 'changeset-only':
2690 if writecopiesto == 'changeset-only':
2680 # If writing only to changeset extras, use None to indicate that
2691 # If writing only to changeset extras, use None to indicate that
2681 # no entry should be written. If writing to both, write an empty
2692 # no entry should be written. If writing to both, write an empty
2682 # entry to prevent the reader from falling back to reading
2693 # entry to prevent the reader from falling back to reading
2683 # filelogs.
2694 # filelogs.
2684 p1copies = p1copies or None
2695 p1copies = p1copies or None
2685 p2copies = p2copies or None
2696 p2copies = p2copies or None
2697 filesadded = filesadded or None
2698 filesremoved = filesremoved or None
2686
2699
2687 # update changelog
2700 # update changelog
2688 self.ui.note(_("committing changelog\n"))
2701 self.ui.note(_("committing changelog\n"))
2689 self.changelog.delayupdate(tr)
2702 self.changelog.delayupdate(tr)
2690 n = self.changelog.add(mn, files, ctx.description(),
2703 n = self.changelog.add(mn, files, ctx.description(),
2691 trp, p1.node(), p2.node(),
2704 trp, p1.node(), p2.node(),
2692 user, ctx.date(), ctx.extra().copy(),
2705 user, ctx.date(), ctx.extra().copy(),
2693 p1copies, p2copies)
2706 p1copies, p2copies, filesadded, filesremoved)
2694 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2707 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2695 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2708 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2696 parent2=xp2)
2709 parent2=xp2)
2697 # set the new commit is proper phase
2710 # set the new commit is proper phase
2698 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2711 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2699 if targetphase:
2712 if targetphase:
2700 # retract boundary do not alter parent changeset.
2713 # retract boundary do not alter parent changeset.
2701 # if a parent have higher the resulting phase will
2714 # if a parent have higher the resulting phase will
2702 # be compliant anyway
2715 # be compliant anyway
2703 #
2716 #
2704 # if minimal phase was 0 we don't need to retract anything
2717 # if minimal phase was 0 we don't need to retract anything
2705 phases.registernew(self, tr, targetphase, [n])
2718 phases.registernew(self, tr, targetphase, [n])
2706 return n
2719 return n
2707
2720
2708 @unfilteredmethod
2721 @unfilteredmethod
2709 def destroying(self):
2722 def destroying(self):
2710 '''Inform the repository that nodes are about to be destroyed.
2723 '''Inform the repository that nodes are about to be destroyed.
2711 Intended for use by strip and rollback, so there's a common
2724 Intended for use by strip and rollback, so there's a common
2712 place for anything that has to be done before destroying history.
2725 place for anything that has to be done before destroying history.
2713
2726
2714 This is mostly useful for saving state that is in memory and waiting
2727 This is mostly useful for saving state that is in memory and waiting
2715 to be flushed when the current lock is released. Because a call to
2728 to be flushed when the current lock is released. Because a call to
2716 destroyed is imminent, the repo will be invalidated causing those
2729 destroyed is imminent, the repo will be invalidated causing those
2717 changes to stay in memory (waiting for the next unlock), or vanish
2730 changes to stay in memory (waiting for the next unlock), or vanish
2718 completely.
2731 completely.
2719 '''
2732 '''
2720 # When using the same lock to commit and strip, the phasecache is left
2733 # When using the same lock to commit and strip, the phasecache is left
2721 # dirty after committing. Then when we strip, the repo is invalidated,
2734 # dirty after committing. Then when we strip, the repo is invalidated,
2722 # causing those changes to disappear.
2735 # causing those changes to disappear.
2723 if '_phasecache' in vars(self):
2736 if '_phasecache' in vars(self):
2724 self._phasecache.write()
2737 self._phasecache.write()
2725
2738
2726 @unfilteredmethod
2739 @unfilteredmethod
2727 def destroyed(self):
2740 def destroyed(self):
2728 '''Inform the repository that nodes have been destroyed.
2741 '''Inform the repository that nodes have been destroyed.
2729 Intended for use by strip and rollback, so there's a common
2742 Intended for use by strip and rollback, so there's a common
2730 place for anything that has to be done after destroying history.
2743 place for anything that has to be done after destroying history.
2731 '''
2744 '''
2732 # When one tries to:
2745 # When one tries to:
2733 # 1) destroy nodes thus calling this method (e.g. strip)
2746 # 1) destroy nodes thus calling this method (e.g. strip)
2734 # 2) use phasecache somewhere (e.g. commit)
2747 # 2) use phasecache somewhere (e.g. commit)
2735 #
2748 #
2736 # then 2) will fail because the phasecache contains nodes that were
2749 # then 2) will fail because the phasecache contains nodes that were
2737 # removed. We can either remove phasecache from the filecache,
2750 # removed. We can either remove phasecache from the filecache,
2738 # causing it to reload next time it is accessed, or simply filter
2751 # causing it to reload next time it is accessed, or simply filter
2739 # the removed nodes now and write the updated cache.
2752 # the removed nodes now and write the updated cache.
2740 self._phasecache.filterunknown(self)
2753 self._phasecache.filterunknown(self)
2741 self._phasecache.write()
2754 self._phasecache.write()
2742
2755
2743 # refresh all repository caches
2756 # refresh all repository caches
2744 self.updatecaches()
2757 self.updatecaches()
2745
2758
2746 # Ensure the persistent tag cache is updated. Doing it now
2759 # Ensure the persistent tag cache is updated. Doing it now
2747 # means that the tag cache only has to worry about destroyed
2760 # means that the tag cache only has to worry about destroyed
2748 # heads immediately after a strip/rollback. That in turn
2761 # heads immediately after a strip/rollback. That in turn
2749 # guarantees that "cachetip == currenttip" (comparing both rev
2762 # guarantees that "cachetip == currenttip" (comparing both rev
2750 # and node) always means no nodes have been added or destroyed.
2763 # and node) always means no nodes have been added or destroyed.
2751
2764
2752 # XXX this is suboptimal when qrefresh'ing: we strip the current
2765 # XXX this is suboptimal when qrefresh'ing: we strip the current
2753 # head, refresh the tag cache, then immediately add a new head.
2766 # head, refresh the tag cache, then immediately add a new head.
2754 # But I think doing it this way is necessary for the "instant
2767 # But I think doing it this way is necessary for the "instant
2755 # tag cache retrieval" case to work.
2768 # tag cache retrieval" case to work.
2756 self.invalidate()
2769 self.invalidate()
2757
2770
2758 def status(self, node1='.', node2=None, match=None,
2771 def status(self, node1='.', node2=None, match=None,
2759 ignored=False, clean=False, unknown=False,
2772 ignored=False, clean=False, unknown=False,
2760 listsubrepos=False):
2773 listsubrepos=False):
2761 '''a convenience method that calls node1.status(node2)'''
2774 '''a convenience method that calls node1.status(node2)'''
2762 return self[node1].status(node2, match, ignored, clean, unknown,
2775 return self[node1].status(node2, match, ignored, clean, unknown,
2763 listsubrepos)
2776 listsubrepos)
2764
2777
2765 def addpostdsstatus(self, ps):
2778 def addpostdsstatus(self, ps):
2766 """Add a callback to run within the wlock, at the point at which status
2779 """Add a callback to run within the wlock, at the point at which status
2767 fixups happen.
2780 fixups happen.
2768
2781
2769 On status completion, callback(wctx, status) will be called with the
2782 On status completion, callback(wctx, status) will be called with the
2770 wlock held, unless the dirstate has changed from underneath or the wlock
2783 wlock held, unless the dirstate has changed from underneath or the wlock
2771 couldn't be grabbed.
2784 couldn't be grabbed.
2772
2785
2773 Callbacks should not capture and use a cached copy of the dirstate --
2786 Callbacks should not capture and use a cached copy of the dirstate --
2774 it might change in the meanwhile. Instead, they should access the
2787 it might change in the meanwhile. Instead, they should access the
2775 dirstate via wctx.repo().dirstate.
2788 dirstate via wctx.repo().dirstate.
2776
2789
2777 This list is emptied out after each status run -- extensions should
2790 This list is emptied out after each status run -- extensions should
2778 make sure it adds to this list each time dirstate.status is called.
2791 make sure it adds to this list each time dirstate.status is called.
2779 Extensions should also make sure they don't call this for statuses
2792 Extensions should also make sure they don't call this for statuses
2780 that don't involve the dirstate.
2793 that don't involve the dirstate.
2781 """
2794 """
2782
2795
2783 # The list is located here for uniqueness reasons -- it is actually
2796 # The list is located here for uniqueness reasons -- it is actually
2784 # managed by the workingctx, but that isn't unique per-repo.
2797 # managed by the workingctx, but that isn't unique per-repo.
2785 self._postdsstatus.append(ps)
2798 self._postdsstatus.append(ps)
2786
2799
2787 def postdsstatus(self):
2800 def postdsstatus(self):
2788 """Used by workingctx to get the list of post-dirstate-status hooks."""
2801 """Used by workingctx to get the list of post-dirstate-status hooks."""
2789 return self._postdsstatus
2802 return self._postdsstatus
2790
2803
2791 def clearpostdsstatus(self):
2804 def clearpostdsstatus(self):
2792 """Used by workingctx to clear post-dirstate-status hooks."""
2805 """Used by workingctx to clear post-dirstate-status hooks."""
2793 del self._postdsstatus[:]
2806 del self._postdsstatus[:]
2794
2807
2795 def heads(self, start=None):
2808 def heads(self, start=None):
2796 if start is None:
2809 if start is None:
2797 cl = self.changelog
2810 cl = self.changelog
2798 headrevs = reversed(cl.headrevs())
2811 headrevs = reversed(cl.headrevs())
2799 return [cl.node(rev) for rev in headrevs]
2812 return [cl.node(rev) for rev in headrevs]
2800
2813
2801 heads = self.changelog.heads(start)
2814 heads = self.changelog.heads(start)
2802 # sort the output in rev descending order
2815 # sort the output in rev descending order
2803 return sorted(heads, key=self.changelog.rev, reverse=True)
2816 return sorted(heads, key=self.changelog.rev, reverse=True)
2804
2817
2805 def branchheads(self, branch=None, start=None, closed=False):
2818 def branchheads(self, branch=None, start=None, closed=False):
2806 '''return a (possibly filtered) list of heads for the given branch
2819 '''return a (possibly filtered) list of heads for the given branch
2807
2820
2808 Heads are returned in topological order, from newest to oldest.
2821 Heads are returned in topological order, from newest to oldest.
2809 If branch is None, use the dirstate branch.
2822 If branch is None, use the dirstate branch.
2810 If start is not None, return only heads reachable from start.
2823 If start is not None, return only heads reachable from start.
2811 If closed is True, return heads that are marked as closed as well.
2824 If closed is True, return heads that are marked as closed as well.
2812 '''
2825 '''
2813 if branch is None:
2826 if branch is None:
2814 branch = self[None].branch()
2827 branch = self[None].branch()
2815 branches = self.branchmap()
2828 branches = self.branchmap()
2816 if not branches.hasbranch(branch):
2829 if not branches.hasbranch(branch):
2817 return []
2830 return []
2818 # the cache returns heads ordered lowest to highest
2831 # the cache returns heads ordered lowest to highest
2819 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2832 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2820 if start is not None:
2833 if start is not None:
2821 # filter out the heads that cannot be reached from startrev
2834 # filter out the heads that cannot be reached from startrev
2822 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2835 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2823 bheads = [h for h in bheads if h in fbheads]
2836 bheads = [h for h in bheads if h in fbheads]
2824 return bheads
2837 return bheads
2825
2838
2826 def branches(self, nodes):
2839 def branches(self, nodes):
2827 if not nodes:
2840 if not nodes:
2828 nodes = [self.changelog.tip()]
2841 nodes = [self.changelog.tip()]
2829 b = []
2842 b = []
2830 for n in nodes:
2843 for n in nodes:
2831 t = n
2844 t = n
2832 while True:
2845 while True:
2833 p = self.changelog.parents(n)
2846 p = self.changelog.parents(n)
2834 if p[1] != nullid or p[0] == nullid:
2847 if p[1] != nullid or p[0] == nullid:
2835 b.append((t, n, p[0], p[1]))
2848 b.append((t, n, p[0], p[1]))
2836 break
2849 break
2837 n = p[0]
2850 n = p[0]
2838 return b
2851 return b
2839
2852
2840 def between(self, pairs):
2853 def between(self, pairs):
2841 r = []
2854 r = []
2842
2855
2843 for top, bottom in pairs:
2856 for top, bottom in pairs:
2844 n, l, i = top, [], 0
2857 n, l, i = top, [], 0
2845 f = 1
2858 f = 1
2846
2859
2847 while n != bottom and n != nullid:
2860 while n != bottom and n != nullid:
2848 p = self.changelog.parents(n)[0]
2861 p = self.changelog.parents(n)[0]
2849 if i == f:
2862 if i == f:
2850 l.append(n)
2863 l.append(n)
2851 f = f * 2
2864 f = f * 2
2852 n = p
2865 n = p
2853 i += 1
2866 i += 1
2854
2867
2855 r.append(l)
2868 r.append(l)
2856
2869
2857 return r
2870 return r
2858
2871
2859 def checkpush(self, pushop):
2872 def checkpush(self, pushop):
2860 """Extensions can override this function if additional checks have
2873 """Extensions can override this function if additional checks have
2861 to be performed before pushing, or call it if they override push
2874 to be performed before pushing, or call it if they override push
2862 command.
2875 command.
2863 """
2876 """
2864
2877
2865 @unfilteredpropertycache
2878 @unfilteredpropertycache
2866 def prepushoutgoinghooks(self):
2879 def prepushoutgoinghooks(self):
2867 """Return util.hooks consists of a pushop with repo, remote, outgoing
2880 """Return util.hooks consists of a pushop with repo, remote, outgoing
2868 methods, which are called before pushing changesets.
2881 methods, which are called before pushing changesets.
2869 """
2882 """
2870 return util.hooks()
2883 return util.hooks()
2871
2884
2872 def pushkey(self, namespace, key, old, new):
2885 def pushkey(self, namespace, key, old, new):
2873 try:
2886 try:
2874 tr = self.currenttransaction()
2887 tr = self.currenttransaction()
2875 hookargs = {}
2888 hookargs = {}
2876 if tr is not None:
2889 if tr is not None:
2877 hookargs.update(tr.hookargs)
2890 hookargs.update(tr.hookargs)
2878 hookargs = pycompat.strkwargs(hookargs)
2891 hookargs = pycompat.strkwargs(hookargs)
2879 hookargs[r'namespace'] = namespace
2892 hookargs[r'namespace'] = namespace
2880 hookargs[r'key'] = key
2893 hookargs[r'key'] = key
2881 hookargs[r'old'] = old
2894 hookargs[r'old'] = old
2882 hookargs[r'new'] = new
2895 hookargs[r'new'] = new
2883 self.hook('prepushkey', throw=True, **hookargs)
2896 self.hook('prepushkey', throw=True, **hookargs)
2884 except error.HookAbort as exc:
2897 except error.HookAbort as exc:
2885 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2898 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2886 if exc.hint:
2899 if exc.hint:
2887 self.ui.write_err(_("(%s)\n") % exc.hint)
2900 self.ui.write_err(_("(%s)\n") % exc.hint)
2888 return False
2901 return False
2889 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2902 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2890 ret = pushkey.push(self, namespace, key, old, new)
2903 ret = pushkey.push(self, namespace, key, old, new)
2891 def runhook():
2904 def runhook():
2892 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2905 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2893 ret=ret)
2906 ret=ret)
2894 self._afterlock(runhook)
2907 self._afterlock(runhook)
2895 return ret
2908 return ret
2896
2909
2897 def listkeys(self, namespace):
2910 def listkeys(self, namespace):
2898 self.hook('prelistkeys', throw=True, namespace=namespace)
2911 self.hook('prelistkeys', throw=True, namespace=namespace)
2899 self.ui.debug('listing keys for "%s"\n' % namespace)
2912 self.ui.debug('listing keys for "%s"\n' % namespace)
2900 values = pushkey.list(self, namespace)
2913 values = pushkey.list(self, namespace)
2901 self.hook('listkeys', namespace=namespace, values=values)
2914 self.hook('listkeys', namespace=namespace, values=values)
2902 return values
2915 return values
2903
2916
2904 def debugwireargs(self, one, two, three=None, four=None, five=None):
2917 def debugwireargs(self, one, two, three=None, four=None, five=None):
2905 '''used to test argument passing over the wire'''
2918 '''used to test argument passing over the wire'''
2906 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2919 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2907 pycompat.bytestr(four),
2920 pycompat.bytestr(four),
2908 pycompat.bytestr(five))
2921 pycompat.bytestr(five))
2909
2922
2910 def savecommitmessage(self, text):
2923 def savecommitmessage(self, text):
2911 fp = self.vfs('last-message.txt', 'wb')
2924 fp = self.vfs('last-message.txt', 'wb')
2912 try:
2925 try:
2913 fp.write(text)
2926 fp.write(text)
2914 finally:
2927 finally:
2915 fp.close()
2928 fp.close()
2916 return self.pathto(fp.name[len(self.root) + 1:])
2929 return self.pathto(fp.name[len(self.root) + 1:])
2917
2930
2918 # used to avoid circular references so destructors work
2931 # used to avoid circular references so destructors work
2919 def aftertrans(files):
2932 def aftertrans(files):
2920 renamefiles = [tuple(t) for t in files]
2933 renamefiles = [tuple(t) for t in files]
2921 def a():
2934 def a():
2922 for vfs, src, dest in renamefiles:
2935 for vfs, src, dest in renamefiles:
2923 # if src and dest refer to a same file, vfs.rename is a no-op,
2936 # if src and dest refer to a same file, vfs.rename is a no-op,
2924 # leaving both src and dest on disk. delete dest to make sure
2937 # leaving both src and dest on disk. delete dest to make sure
2925 # the rename couldn't be such a no-op.
2938 # the rename couldn't be such a no-op.
2926 vfs.tryunlink(dest)
2939 vfs.tryunlink(dest)
2927 try:
2940 try:
2928 vfs.rename(src, dest)
2941 vfs.rename(src, dest)
2929 except OSError: # journal file does not yet exist
2942 except OSError: # journal file does not yet exist
2930 pass
2943 pass
2931 return a
2944 return a
2932
2945
2933 def undoname(fn):
2946 def undoname(fn):
2934 base, name = os.path.split(fn)
2947 base, name = os.path.split(fn)
2935 assert name.startswith('journal')
2948 assert name.startswith('journal')
2936 return os.path.join(base, name.replace('journal', 'undo', 1))
2949 return os.path.join(base, name.replace('journal', 'undo', 1))
2937
2950
2938 def instance(ui, path, create, intents=None, createopts=None):
2951 def instance(ui, path, create, intents=None, createopts=None):
2939 localpath = util.urllocalpath(path)
2952 localpath = util.urllocalpath(path)
2940 if create:
2953 if create:
2941 createrepository(ui, localpath, createopts=createopts)
2954 createrepository(ui, localpath, createopts=createopts)
2942
2955
2943 return makelocalrepository(ui, localpath, intents=intents)
2956 return makelocalrepository(ui, localpath, intents=intents)
2944
2957
2945 def islocal(path):
2958 def islocal(path):
2946 return True
2959 return True
2947
2960
2948 def defaultcreateopts(ui, createopts=None):
2961 def defaultcreateopts(ui, createopts=None):
2949 """Populate the default creation options for a repository.
2962 """Populate the default creation options for a repository.
2950
2963
2951 A dictionary of explicitly requested creation options can be passed
2964 A dictionary of explicitly requested creation options can be passed
2952 in. Missing keys will be populated.
2965 in. Missing keys will be populated.
2953 """
2966 """
2954 createopts = dict(createopts or {})
2967 createopts = dict(createopts or {})
2955
2968
2956 if 'backend' not in createopts:
2969 if 'backend' not in createopts:
2957 # experimental config: storage.new-repo-backend
2970 # experimental config: storage.new-repo-backend
2958 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2971 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2959
2972
2960 return createopts
2973 return createopts
2961
2974
2962 def newreporequirements(ui, createopts):
2975 def newreporequirements(ui, createopts):
2963 """Determine the set of requirements for a new local repository.
2976 """Determine the set of requirements for a new local repository.
2964
2977
2965 Extensions can wrap this function to specify custom requirements for
2978 Extensions can wrap this function to specify custom requirements for
2966 new repositories.
2979 new repositories.
2967 """
2980 """
2968 # If the repo is being created from a shared repository, we copy
2981 # If the repo is being created from a shared repository, we copy
2969 # its requirements.
2982 # its requirements.
2970 if 'sharedrepo' in createopts:
2983 if 'sharedrepo' in createopts:
2971 requirements = set(createopts['sharedrepo'].requirements)
2984 requirements = set(createopts['sharedrepo'].requirements)
2972 if createopts.get('sharedrelative'):
2985 if createopts.get('sharedrelative'):
2973 requirements.add('relshared')
2986 requirements.add('relshared')
2974 else:
2987 else:
2975 requirements.add('shared')
2988 requirements.add('shared')
2976
2989
2977 return requirements
2990 return requirements
2978
2991
2979 if 'backend' not in createopts:
2992 if 'backend' not in createopts:
2980 raise error.ProgrammingError('backend key not present in createopts; '
2993 raise error.ProgrammingError('backend key not present in createopts; '
2981 'was defaultcreateopts() called?')
2994 'was defaultcreateopts() called?')
2982
2995
2983 if createopts['backend'] != 'revlogv1':
2996 if createopts['backend'] != 'revlogv1':
2984 raise error.Abort(_('unable to determine repository requirements for '
2997 raise error.Abort(_('unable to determine repository requirements for '
2985 'storage backend: %s') % createopts['backend'])
2998 'storage backend: %s') % createopts['backend'])
2986
2999
2987 requirements = {'revlogv1'}
3000 requirements = {'revlogv1'}
2988 if ui.configbool('format', 'usestore'):
3001 if ui.configbool('format', 'usestore'):
2989 requirements.add('store')
3002 requirements.add('store')
2990 if ui.configbool('format', 'usefncache'):
3003 if ui.configbool('format', 'usefncache'):
2991 requirements.add('fncache')
3004 requirements.add('fncache')
2992 if ui.configbool('format', 'dotencode'):
3005 if ui.configbool('format', 'dotencode'):
2993 requirements.add('dotencode')
3006 requirements.add('dotencode')
2994
3007
2995 compengine = ui.config('format', 'revlog-compression')
3008 compengine = ui.config('format', 'revlog-compression')
2996 if compengine not in util.compengines:
3009 if compengine not in util.compengines:
2997 raise error.Abort(_('compression engine %s defined by '
3010 raise error.Abort(_('compression engine %s defined by '
2998 'format.revlog-compression not available') %
3011 'format.revlog-compression not available') %
2999 compengine,
3012 compengine,
3000 hint=_('run "hg debuginstall" to list available '
3013 hint=_('run "hg debuginstall" to list available '
3001 'compression engines'))
3014 'compression engines'))
3002
3015
3003 # zlib is the historical default and doesn't need an explicit requirement.
3016 # zlib is the historical default and doesn't need an explicit requirement.
3004 elif compengine == 'zstd':
3017 elif compengine == 'zstd':
3005 requirements.add('revlog-compression-zstd')
3018 requirements.add('revlog-compression-zstd')
3006 elif compengine != 'zlib':
3019 elif compengine != 'zlib':
3007 requirements.add('exp-compression-%s' % compengine)
3020 requirements.add('exp-compression-%s' % compengine)
3008
3021
3009 if scmutil.gdinitconfig(ui):
3022 if scmutil.gdinitconfig(ui):
3010 requirements.add('generaldelta')
3023 requirements.add('generaldelta')
3011 if ui.configbool('format', 'sparse-revlog'):
3024 if ui.configbool('format', 'sparse-revlog'):
3012 requirements.add(SPARSEREVLOG_REQUIREMENT)
3025 requirements.add(SPARSEREVLOG_REQUIREMENT)
3013 if ui.configbool('experimental', 'treemanifest'):
3026 if ui.configbool('experimental', 'treemanifest'):
3014 requirements.add('treemanifest')
3027 requirements.add('treemanifest')
3015
3028
3016 revlogv2 = ui.config('experimental', 'revlogv2')
3029 revlogv2 = ui.config('experimental', 'revlogv2')
3017 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3030 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3018 requirements.remove('revlogv1')
3031 requirements.remove('revlogv1')
3019 # generaldelta is implied by revlogv2.
3032 # generaldelta is implied by revlogv2.
3020 requirements.discard('generaldelta')
3033 requirements.discard('generaldelta')
3021 requirements.add(REVLOGV2_REQUIREMENT)
3034 requirements.add(REVLOGV2_REQUIREMENT)
3022 # experimental config: format.internal-phase
3035 # experimental config: format.internal-phase
3023 if ui.configbool('format', 'internal-phase'):
3036 if ui.configbool('format', 'internal-phase'):
3024 requirements.add('internal-phase')
3037 requirements.add('internal-phase')
3025
3038
3026 if createopts.get('narrowfiles'):
3039 if createopts.get('narrowfiles'):
3027 requirements.add(repository.NARROW_REQUIREMENT)
3040 requirements.add(repository.NARROW_REQUIREMENT)
3028
3041
3029 if createopts.get('lfs'):
3042 if createopts.get('lfs'):
3030 requirements.add('lfs')
3043 requirements.add('lfs')
3031
3044
3032 if ui.configbool('format', 'bookmarks-in-store'):
3045 if ui.configbool('format', 'bookmarks-in-store'):
3033 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3046 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3034
3047
3035 return requirements
3048 return requirements
3036
3049
3037 def filterknowncreateopts(ui, createopts):
3050 def filterknowncreateopts(ui, createopts):
3038 """Filters a dict of repo creation options against options that are known.
3051 """Filters a dict of repo creation options against options that are known.
3039
3052
3040 Receives a dict of repo creation options and returns a dict of those
3053 Receives a dict of repo creation options and returns a dict of those
3041 options that we don't know how to handle.
3054 options that we don't know how to handle.
3042
3055
3043 This function is called as part of repository creation. If the
3056 This function is called as part of repository creation. If the
3044 returned dict contains any items, repository creation will not
3057 returned dict contains any items, repository creation will not
3045 be allowed, as it means there was a request to create a repository
3058 be allowed, as it means there was a request to create a repository
3046 with options not recognized by loaded code.
3059 with options not recognized by loaded code.
3047
3060
3048 Extensions can wrap this function to filter out creation options
3061 Extensions can wrap this function to filter out creation options
3049 they know how to handle.
3062 they know how to handle.
3050 """
3063 """
3051 known = {
3064 known = {
3052 'backend',
3065 'backend',
3053 'lfs',
3066 'lfs',
3054 'narrowfiles',
3067 'narrowfiles',
3055 'sharedrepo',
3068 'sharedrepo',
3056 'sharedrelative',
3069 'sharedrelative',
3057 'shareditems',
3070 'shareditems',
3058 'shallowfilestore',
3071 'shallowfilestore',
3059 }
3072 }
3060
3073
3061 return {k: v for k, v in createopts.items() if k not in known}
3074 return {k: v for k, v in createopts.items() if k not in known}
3062
3075
3063 def createrepository(ui, path, createopts=None):
3076 def createrepository(ui, path, createopts=None):
3064 """Create a new repository in a vfs.
3077 """Create a new repository in a vfs.
3065
3078
3066 ``path`` path to the new repo's working directory.
3079 ``path`` path to the new repo's working directory.
3067 ``createopts`` options for the new repository.
3080 ``createopts`` options for the new repository.
3068
3081
3069 The following keys for ``createopts`` are recognized:
3082 The following keys for ``createopts`` are recognized:
3070
3083
3071 backend
3084 backend
3072 The storage backend to use.
3085 The storage backend to use.
3073 lfs
3086 lfs
3074 Repository will be created with ``lfs`` requirement. The lfs extension
3087 Repository will be created with ``lfs`` requirement. The lfs extension
3075 will automatically be loaded when the repository is accessed.
3088 will automatically be loaded when the repository is accessed.
3076 narrowfiles
3089 narrowfiles
3077 Set up repository to support narrow file storage.
3090 Set up repository to support narrow file storage.
3078 sharedrepo
3091 sharedrepo
3079 Repository object from which storage should be shared.
3092 Repository object from which storage should be shared.
3080 sharedrelative
3093 sharedrelative
3081 Boolean indicating if the path to the shared repo should be
3094 Boolean indicating if the path to the shared repo should be
3082 stored as relative. By default, the pointer to the "parent" repo
3095 stored as relative. By default, the pointer to the "parent" repo
3083 is stored as an absolute path.
3096 is stored as an absolute path.
3084 shareditems
3097 shareditems
3085 Set of items to share to the new repository (in addition to storage).
3098 Set of items to share to the new repository (in addition to storage).
3086 shallowfilestore
3099 shallowfilestore
3087 Indicates that storage for files should be shallow (not all ancestor
3100 Indicates that storage for files should be shallow (not all ancestor
3088 revisions are known).
3101 revisions are known).
3089 """
3102 """
3090 createopts = defaultcreateopts(ui, createopts=createopts)
3103 createopts = defaultcreateopts(ui, createopts=createopts)
3091
3104
3092 unknownopts = filterknowncreateopts(ui, createopts)
3105 unknownopts = filterknowncreateopts(ui, createopts)
3093
3106
3094 if not isinstance(unknownopts, dict):
3107 if not isinstance(unknownopts, dict):
3095 raise error.ProgrammingError('filterknowncreateopts() did not return '
3108 raise error.ProgrammingError('filterknowncreateopts() did not return '
3096 'a dict')
3109 'a dict')
3097
3110
3098 if unknownopts:
3111 if unknownopts:
3099 raise error.Abort(_('unable to create repository because of unknown '
3112 raise error.Abort(_('unable to create repository because of unknown '
3100 'creation option: %s') %
3113 'creation option: %s') %
3101 ', '.join(sorted(unknownopts)),
3114 ', '.join(sorted(unknownopts)),
3102 hint=_('is a required extension not loaded?'))
3115 hint=_('is a required extension not loaded?'))
3103
3116
3104 requirements = newreporequirements(ui, createopts=createopts)
3117 requirements = newreporequirements(ui, createopts=createopts)
3105
3118
3106 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3119 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3107
3120
3108 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3121 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3109 if hgvfs.exists():
3122 if hgvfs.exists():
3110 raise error.RepoError(_('repository %s already exists') % path)
3123 raise error.RepoError(_('repository %s already exists') % path)
3111
3124
3112 if 'sharedrepo' in createopts:
3125 if 'sharedrepo' in createopts:
3113 sharedpath = createopts['sharedrepo'].sharedpath
3126 sharedpath = createopts['sharedrepo'].sharedpath
3114
3127
3115 if createopts.get('sharedrelative'):
3128 if createopts.get('sharedrelative'):
3116 try:
3129 try:
3117 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3130 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3118 except (IOError, ValueError) as e:
3131 except (IOError, ValueError) as e:
3119 # ValueError is raised on Windows if the drive letters differ
3132 # ValueError is raised on Windows if the drive letters differ
3120 # on each path.
3133 # on each path.
3121 raise error.Abort(_('cannot calculate relative path'),
3134 raise error.Abort(_('cannot calculate relative path'),
3122 hint=stringutil.forcebytestr(e))
3135 hint=stringutil.forcebytestr(e))
3123
3136
3124 if not wdirvfs.exists():
3137 if not wdirvfs.exists():
3125 wdirvfs.makedirs()
3138 wdirvfs.makedirs()
3126
3139
3127 hgvfs.makedir(notindexed=True)
3140 hgvfs.makedir(notindexed=True)
3128 if 'sharedrepo' not in createopts:
3141 if 'sharedrepo' not in createopts:
3129 hgvfs.mkdir(b'cache')
3142 hgvfs.mkdir(b'cache')
3130 hgvfs.mkdir(b'wcache')
3143 hgvfs.mkdir(b'wcache')
3131
3144
3132 if b'store' in requirements and 'sharedrepo' not in createopts:
3145 if b'store' in requirements and 'sharedrepo' not in createopts:
3133 hgvfs.mkdir(b'store')
3146 hgvfs.mkdir(b'store')
3134
3147
3135 # We create an invalid changelog outside the store so very old
3148 # We create an invalid changelog outside the store so very old
3136 # Mercurial versions (which didn't know about the requirements
3149 # Mercurial versions (which didn't know about the requirements
3137 # file) encounter an error on reading the changelog. This
3150 # file) encounter an error on reading the changelog. This
3138 # effectively locks out old clients and prevents them from
3151 # effectively locks out old clients and prevents them from
3139 # mucking with a repo in an unknown format.
3152 # mucking with a repo in an unknown format.
3140 #
3153 #
3141 # The revlog header has version 2, which won't be recognized by
3154 # The revlog header has version 2, which won't be recognized by
3142 # such old clients.
3155 # such old clients.
3143 hgvfs.append(b'00changelog.i',
3156 hgvfs.append(b'00changelog.i',
3144 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3157 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3145 b'layout')
3158 b'layout')
3146
3159
3147 scmutil.writerequires(hgvfs, requirements)
3160 scmutil.writerequires(hgvfs, requirements)
3148
3161
3149 # Write out file telling readers where to find the shared store.
3162 # Write out file telling readers where to find the shared store.
3150 if 'sharedrepo' in createopts:
3163 if 'sharedrepo' in createopts:
3151 hgvfs.write(b'sharedpath', sharedpath)
3164 hgvfs.write(b'sharedpath', sharedpath)
3152
3165
3153 if createopts.get('shareditems'):
3166 if createopts.get('shareditems'):
3154 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3167 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3155 hgvfs.write(b'shared', shared)
3168 hgvfs.write(b'shared', shared)
3156
3169
3157 def poisonrepository(repo):
3170 def poisonrepository(repo):
3158 """Poison a repository instance so it can no longer be used."""
3171 """Poison a repository instance so it can no longer be used."""
3159 # Perform any cleanup on the instance.
3172 # Perform any cleanup on the instance.
3160 repo.close()
3173 repo.close()
3161
3174
3162 # Our strategy is to replace the type of the object with one that
3175 # Our strategy is to replace the type of the object with one that
3163 # has all attribute lookups result in error.
3176 # has all attribute lookups result in error.
3164 #
3177 #
3165 # But we have to allow the close() method because some constructors
3178 # But we have to allow the close() method because some constructors
3166 # of repos call close() on repo references.
3179 # of repos call close() on repo references.
3167 class poisonedrepository(object):
3180 class poisonedrepository(object):
3168 def __getattribute__(self, item):
3181 def __getattribute__(self, item):
3169 if item == r'close':
3182 if item == r'close':
3170 return object.__getattribute__(self, item)
3183 return object.__getattribute__(self, item)
3171
3184
3172 raise error.ProgrammingError('repo instances should not be used '
3185 raise error.ProgrammingError('repo instances should not be used '
3173 'after unshare')
3186 'after unshare')
3174
3187
3175 def close(self):
3188 def close(self):
3176 pass
3189 pass
3177
3190
3178 # We may have a repoview, which intercepts __setattr__. So be sure
3191 # We may have a repoview, which intercepts __setattr__. So be sure
3179 # we operate at the lowest level possible.
3192 # we operate at the lowest level possible.
3180 object.__setattr__(repo, r'__class__', poisonedrepository)
3193 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,167 +1,183 b''
1
1
2 $ cat >> $HGRCPATH << EOF
2 $ cat >> $HGRCPATH << EOF
3 > [experimental]
3 > [experimental]
4 > copies.write-to=changeset-only
4 > copies.write-to=changeset-only
5 > copies.read-from=changeset-only
5 > copies.read-from=changeset-only
6 > [alias]
6 > [alias]
7 > changesetcopies = log -r . -T 'files: {files}
7 > changesetcopies = log -r . -T 'files: {files}
8 > {extras % "{ifcontains("files", key, "{key}: {value}\n")}"}
8 > {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
9 > {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
9 > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}'
10 > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}'
10 > [extensions]
11 > [extensions]
11 > rebase =
12 > rebase =
12 > EOF
13 > EOF
13
14
14 Check that copies are recorded correctly
15 Check that copies are recorded correctly
15
16
16 $ hg init repo
17 $ hg init repo
17 $ cd repo
18 $ cd repo
18 $ echo a > a
19 $ echo a > a
19 $ hg add a
20 $ hg add a
20 $ hg ci -m initial
21 $ hg ci -m initial
21 $ hg cp a b
22 $ hg cp a b
22 $ hg cp a c
23 $ hg cp a c
23 $ hg cp a d
24 $ hg cp a d
24 $ hg ci -m 'copy a to b, c, and d'
25 $ hg ci -m 'copy a to b, c, and d'
25 $ hg changesetcopies
26 $ hg changesetcopies
26 files: b c d
27 files: b c d
28 filesadded: 0\x001\x002 (esc)
29
27 p1copies: b\x00a (esc)
30 p1copies: b\x00a (esc)
28 c\x00a (esc)
31 c\x00a (esc)
29 d\x00a (esc)
32 d\x00a (esc)
30 $ hg showcopies
33 $ hg showcopies
31 a -> b
34 a -> b
32 a -> c
35 a -> c
33 a -> d
36 a -> d
34 $ hg showcopies --config experimental.copies.read-from=compatibility
37 $ hg showcopies --config experimental.copies.read-from=compatibility
35 a -> b
38 a -> b
36 a -> c
39 a -> c
37 a -> d
40 a -> d
38 $ hg showcopies --config experimental.copies.read-from=filelog-only
41 $ hg showcopies --config experimental.copies.read-from=filelog-only
39
42
40 Check that renames are recorded correctly
43 Check that renames are recorded correctly
41
44
42 $ hg mv b b2
45 $ hg mv b b2
43 $ hg ci -m 'rename b to b2'
46 $ hg ci -m 'rename b to b2'
44 $ hg changesetcopies
47 $ hg changesetcopies
45 files: b b2
48 files: b b2
49 filesadded: 1
50 filesremoved: 0
51
46 p1copies: b2\x00b (esc)
52 p1copies: b2\x00b (esc)
47 $ hg showcopies
53 $ hg showcopies
48 b -> b2
54 b -> b2
49
55
50 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
56 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
51 even though there is no filelog entry.
57 even though there is no filelog entry.
52
58
53 $ hg cp b2 c --force
59 $ hg cp b2 c --force
54 $ hg st --copies
60 $ hg st --copies
55 M c
61 M c
56 b2
62 b2
57 $ hg debugindex c
63 $ hg debugindex c
58 rev linkrev nodeid p1 p2
64 rev linkrev nodeid p1 p2
59 0 1 b789fdd96dc2 000000000000 000000000000
65 0 1 b789fdd96dc2 000000000000 000000000000
60 $ hg ci -m 'move b onto d'
66 $ hg ci -m 'move b onto d'
61 $ hg changesetcopies
67 $ hg changesetcopies
62 files: c
68 files: c
69
63 p1copies: c\x00b2 (esc)
70 p1copies: c\x00b2 (esc)
64 $ hg showcopies
71 $ hg showcopies
65 b2 -> c
72 b2 -> c
66 $ hg debugindex c
73 $ hg debugindex c
67 rev linkrev nodeid p1 p2
74 rev linkrev nodeid p1 p2
68 0 1 b789fdd96dc2 000000000000 000000000000
75 0 1 b789fdd96dc2 000000000000 000000000000
69
76
70 Create a merge commit with copying done during merge.
77 Create a merge commit with copying done during merge.
71
78
72 $ hg co 0
79 $ hg co 0
73 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
80 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
74 $ hg cp a e
81 $ hg cp a e
75 $ hg cp a f
82 $ hg cp a f
76 $ hg ci -m 'copy a to e and f'
83 $ hg ci -m 'copy a to e and f'
77 created new head
84 created new head
78 $ hg merge 3
85 $ hg merge 3
79 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
80 (branch merge, don't forget to commit)
87 (branch merge, don't forget to commit)
81 File 'a' exists on both sides, so 'g' could be recorded as being from p1 or p2, but we currently
88 File 'a' exists on both sides, so 'g' could be recorded as being from p1 or p2, but we currently
82 always record it as being from p1
89 always record it as being from p1
83 $ hg cp a g
90 $ hg cp a g
84 File 'd' exists only in p2, so 'h' should be from p2
91 File 'd' exists only in p2, so 'h' should be from p2
85 $ hg cp d h
92 $ hg cp d h
86 File 'f' exists only in p1, so 'i' should be from p1
93 File 'f' exists only in p1, so 'i' should be from p1
87 $ hg cp f i
94 $ hg cp f i
88 $ hg ci -m 'merge'
95 $ hg ci -m 'merge'
89 $ hg changesetcopies
96 $ hg changesetcopies
90 files: g h i
97 files: g h i
98 filesadded: 0\x001\x002 (esc)
99
91 p1copies: g\x00a (esc)
100 p1copies: g\x00a (esc)
92 i\x00f (esc)
101 i\x00f (esc)
93 p2copies: h\x00d (esc)
102 p2copies: h\x00d (esc)
94 $ hg showcopies
103 $ hg showcopies
95 a -> g
104 a -> g
96 d -> h
105 d -> h
97 f -> i
106 f -> i
98
107
99 Test writing to both changeset and filelog
108 Test writing to both changeset and filelog
100
109
101 $ hg cp a j
110 $ hg cp a j
102 $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
111 $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
103 $ hg changesetcopies
112 $ hg changesetcopies
104 files: j
113 files: j
114 filesadded: 0
115 filesremoved:
116
105 p1copies: j\x00a (esc)
117 p1copies: j\x00a (esc)
106 p2copies:
118 p2copies:
107 $ hg debugdata j 0
119 $ hg debugdata j 0
108 \x01 (esc)
120 \x01 (esc)
109 copy: a
121 copy: a
110 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
122 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
111 \x01 (esc)
123 \x01 (esc)
112 a
124 a
113 $ hg showcopies
125 $ hg showcopies
114 a -> j
126 a -> j
115 $ hg showcopies --config experimental.copies.read-from=compatibility
127 $ hg showcopies --config experimental.copies.read-from=compatibility
116 a -> j
128 a -> j
117 $ hg showcopies --config experimental.copies.read-from=filelog-only
129 $ hg showcopies --config experimental.copies.read-from=filelog-only
118 a -> j
130 a -> j
119 The entries should be written to extras even if they're empty (so the client
131 The entries should be written to extras even if they're empty (so the client
120 won't have to fall back to reading from filelogs)
132 won't have to fall back to reading from filelogs)
121 $ echo x >> j
133 $ echo x >> j
122 $ hg ci -m 'modify j' --config experimental.copies.write-to=compatibility
134 $ hg ci -m 'modify j' --config experimental.copies.write-to=compatibility
123 $ hg changesetcopies
135 $ hg changesetcopies
124 files: j
136 files: j
137 filesadded:
138 filesremoved:
139
125 p1copies:
140 p1copies:
126 p2copies:
141 p2copies:
127
142
128 Test writing only to filelog
143 Test writing only to filelog
129
144
130 $ hg cp a k
145 $ hg cp a k
131 $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
146 $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
132 $ hg changesetcopies
147 $ hg changesetcopies
133 files: k
148 files: k
149
134 $ hg debugdata k 0
150 $ hg debugdata k 0
135 \x01 (esc)
151 \x01 (esc)
136 copy: a
152 copy: a
137 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
153 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
138 \x01 (esc)
154 \x01 (esc)
139 a
155 a
140 $ hg showcopies
156 $ hg showcopies
141 $ hg showcopies --config experimental.copies.read-from=compatibility
157 $ hg showcopies --config experimental.copies.read-from=compatibility
142 a -> k
158 a -> k
143 $ hg showcopies --config experimental.copies.read-from=filelog-only
159 $ hg showcopies --config experimental.copies.read-from=filelog-only
144 a -> k
160 a -> k
145
161
146 $ cd ..
162 $ cd ..
147
163
148 Test rebasing a commit with copy information
164 Test rebasing a commit with copy information
149
165
150 $ hg init rebase-rename
166 $ hg init rebase-rename
151 $ cd rebase-rename
167 $ cd rebase-rename
152 $ echo a > a
168 $ echo a > a
153 $ hg ci -Aqm 'add a'
169 $ hg ci -Aqm 'add a'
154 $ echo a2 > a
170 $ echo a2 > a
155 $ hg ci -m 'modify a'
171 $ hg ci -m 'modify a'
156 $ hg co -q 0
172 $ hg co -q 0
157 $ hg mv a b
173 $ hg mv a b
158 $ hg ci -qm 'rename a to b'
174 $ hg ci -qm 'rename a to b'
159 $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
175 $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
160 rebasing 2:55d0b405c1b2 "rename a to b" (tip)
176 rebasing 2:acfc33f3aa6d "rename a to b" (tip)
161 merging a and b to b
177 merging a and b to b
162 saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/55d0b405c1b2-78df867e-rebase.hg
178 saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/acfc33f3aa6d-81d0180d-rebase.hg
163 $ hg st --change . --copies
179 $ hg st --change . --copies
164 A b
180 A b
165 a
181 a
166 R a
182 R a
167 $ cd ..
183 $ cd ..
@@ -1,644 +1,647 b''
1 #testcases filelog compatibility changeset
1 #testcases filelog compatibility changeset
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > rebase=
5 > rebase=
6 > [alias]
6 > [alias]
7 > l = log -G -T '{rev} {desc}\n{files}\n'
7 > l = log -G -T '{rev} {desc}\n{files}\n'
8 > EOF
8 > EOF
9
9
10 #if compatibility
10 #if compatibility
11 $ cat >> $HGRCPATH << EOF
11 $ cat >> $HGRCPATH << EOF
12 > [experimental]
12 > [experimental]
13 > copies.read-from = compatibility
13 > copies.read-from = compatibility
14 > EOF
14 > EOF
15 #endif
15 #endif
16
16
17 #if changeset
17 #if changeset
18 $ cat >> $HGRCPATH << EOF
18 $ cat >> $HGRCPATH << EOF
19 > [experimental]
19 > [experimental]
20 > copies.read-from = changeset-only
20 > copies.read-from = changeset-only
21 > copies.write-to = changeset-only
21 > copies.write-to = changeset-only
22 > EOF
22 > EOF
23 #endif
23 #endif
24
24
25 $ REPONUM=0
25 $ REPONUM=0
26 $ newrepo() {
26 $ newrepo() {
27 > cd $TESTTMP
27 > cd $TESTTMP
28 > REPONUM=`expr $REPONUM + 1`
28 > REPONUM=`expr $REPONUM + 1`
29 > hg init repo-$REPONUM
29 > hg init repo-$REPONUM
30 > cd repo-$REPONUM
30 > cd repo-$REPONUM
31 > }
31 > }
32
32
33 Simple rename case
33 Simple rename case
34 $ newrepo
34 $ newrepo
35 $ echo x > x
35 $ echo x > x
36 $ hg ci -Aqm 'add x'
36 $ hg ci -Aqm 'add x'
37 $ hg mv x y
37 $ hg mv x y
38 $ hg debugp1copies
38 $ hg debugp1copies
39 x -> y
39 x -> y
40 $ hg debugp2copies
40 $ hg debugp2copies
41 $ hg ci -m 'rename x to y'
41 $ hg ci -m 'rename x to y'
42 $ hg l
42 $ hg l
43 @ 1 rename x to y
43 @ 1 rename x to y
44 | x y
44 | x y
45 o 0 add x
45 o 0 add x
46 x
46 x
47 $ hg debugp1copies -r 1
47 $ hg debugp1copies -r 1
48 x -> y
48 x -> y
49 $ hg debugpathcopies 0 1
49 $ hg debugpathcopies 0 1
50 x -> y
50 x -> y
51 $ hg debugpathcopies 1 0
51 $ hg debugpathcopies 1 0
52 y -> x
52 y -> x
53 Test filtering copies by path. We do filtering by destination.
53 Test filtering copies by path. We do filtering by destination.
54 $ hg debugpathcopies 0 1 x
54 $ hg debugpathcopies 0 1 x
55 $ hg debugpathcopies 1 0 x
55 $ hg debugpathcopies 1 0 x
56 y -> x
56 y -> x
57 $ hg debugpathcopies 0 1 y
57 $ hg debugpathcopies 0 1 y
58 x -> y
58 x -> y
59 $ hg debugpathcopies 1 0 y
59 $ hg debugpathcopies 1 0 y
60
60
61 Copy a file onto another file
61 Copy a file onto another file
62 $ newrepo
62 $ newrepo
63 $ echo x > x
63 $ echo x > x
64 $ echo y > y
64 $ echo y > y
65 $ hg ci -Aqm 'add x and y'
65 $ hg ci -Aqm 'add x and y'
66 $ hg cp -f x y
66 $ hg cp -f x y
67 $ hg debugp1copies
67 $ hg debugp1copies
68 x -> y
68 x -> y
69 $ hg debugp2copies
69 $ hg debugp2copies
70 $ hg ci -m 'copy x onto y'
70 $ hg ci -m 'copy x onto y'
71 $ hg l
71 $ hg l
72 @ 1 copy x onto y
72 @ 1 copy x onto y
73 | y
73 | y
74 o 0 add x and y
74 o 0 add x and y
75 x y
75 x y
76 $ hg debugp1copies -r 1
76 $ hg debugp1copies -r 1
77 x -> y
77 x -> y
78 Incorrectly doesn't show the rename
78 Incorrectly doesn't show the rename
79 $ hg debugpathcopies 0 1
79 $ hg debugpathcopies 0 1
80
80
81 Copy a file onto another file with same content. If metadata is stored in changeset, this does not
81 Copy a file onto another file with same content. If metadata is stored in changeset, this does not
82 produce a new filelog entry. The changeset's "files" entry should still list the file.
82 produce a new filelog entry. The changeset's "files" entry should still list the file.
83 $ newrepo
83 $ newrepo
84 $ echo x > x
84 $ echo x > x
85 $ echo x > x2
85 $ echo x > x2
86 $ hg ci -Aqm 'add x and x2 with same content'
86 $ hg ci -Aqm 'add x and x2 with same content'
87 $ hg cp -f x x2
87 $ hg cp -f x x2
88 $ hg ci -m 'copy x onto x2'
88 $ hg ci -m 'copy x onto x2'
89 $ hg l
89 $ hg l
90 @ 1 copy x onto x2
90 @ 1 copy x onto x2
91 | x2
91 | x2
92 o 0 add x and x2 with same content
92 o 0 add x and x2 with same content
93 x x2
93 x x2
94 $ hg debugp1copies -r 1
94 $ hg debugp1copies -r 1
95 x -> x2
95 x -> x2
96 Incorrectly doesn't show the rename
96 Incorrectly doesn't show the rename
97 $ hg debugpathcopies 0 1
97 $ hg debugpathcopies 0 1
98
98
99 Copy a file, then delete destination, then copy again. This does not create a new filelog entry.
99 Copy a file, then delete destination, then copy again. This does not create a new filelog entry.
100 $ newrepo
100 $ newrepo
101 $ echo x > x
101 $ echo x > x
102 $ hg ci -Aqm 'add x'
102 $ hg ci -Aqm 'add x'
103 $ hg cp x y
103 $ hg cp x y
104 $ hg ci -m 'copy x to y'
104 $ hg ci -m 'copy x to y'
105 $ hg rm y
105 $ hg rm y
106 $ hg ci -m 'remove y'
106 $ hg ci -m 'remove y'
107 $ hg cp -f x y
107 $ hg cp -f x y
108 $ hg ci -m 'copy x onto y (again)'
108 $ hg ci -m 'copy x onto y (again)'
109 $ hg l
109 $ hg l
110 @ 3 copy x onto y (again)
110 @ 3 copy x onto y (again)
111 | y
111 | y
112 o 2 remove y
112 o 2 remove y
113 | y
113 | y
114 o 1 copy x to y
114 o 1 copy x to y
115 | y
115 | y
116 o 0 add x
116 o 0 add x
117 x
117 x
118 $ hg debugp1copies -r 3
118 $ hg debugp1copies -r 3
119 x -> y
119 x -> y
120 $ hg debugpathcopies 0 3
120 $ hg debugpathcopies 0 3
121 x -> y
121 x -> y
122
122
123 Rename file in a loop: x->y->z->x
123 Rename file in a loop: x->y->z->x
124 $ newrepo
124 $ newrepo
125 $ echo x > x
125 $ echo x > x
126 $ hg ci -Aqm 'add x'
126 $ hg ci -Aqm 'add x'
127 $ hg mv x y
127 $ hg mv x y
128 $ hg debugp1copies
128 $ hg debugp1copies
129 x -> y
129 x -> y
130 $ hg debugp2copies
130 $ hg debugp2copies
131 $ hg ci -m 'rename x to y'
131 $ hg ci -m 'rename x to y'
132 $ hg mv y z
132 $ hg mv y z
133 $ hg ci -m 'rename y to z'
133 $ hg ci -m 'rename y to z'
134 $ hg mv z x
134 $ hg mv z x
135 $ hg ci -m 'rename z to x'
135 $ hg ci -m 'rename z to x'
136 $ hg l
136 $ hg l
137 @ 3 rename z to x
137 @ 3 rename z to x
138 | x z
138 | x z
139 o 2 rename y to z
139 o 2 rename y to z
140 | y z
140 | y z
141 o 1 rename x to y
141 o 1 rename x to y
142 | x y
142 | x y
143 o 0 add x
143 o 0 add x
144 x
144 x
145 $ hg debugpathcopies 0 3
145 $ hg debugpathcopies 0 3
146
146
147 Copy x to y, then remove y, then add back y. With copy metadata in the changeset, this could easily
147 Copy x to y, then remove y, then add back y. With copy metadata in the changeset, this could easily
148 end up reporting y as copied from x (if we don't unmark it as a copy when it's removed).
148 end up reporting y as copied from x (if we don't unmark it as a copy when it's removed).
149 $ newrepo
149 $ newrepo
150 $ echo x > x
150 $ echo x > x
151 $ hg ci -Aqm 'add x'
151 $ hg ci -Aqm 'add x'
152 $ hg mv x y
152 $ hg mv x y
153 $ hg ci -m 'rename x to y'
153 $ hg ci -m 'rename x to y'
154 $ hg rm y
154 $ hg rm y
155 $ hg ci -qm 'remove y'
155 $ hg ci -qm 'remove y'
156 $ echo x > y
156 $ echo x > y
157 $ hg ci -Aqm 'add back y'
157 $ hg ci -Aqm 'add back y'
158 $ hg l
158 $ hg l
159 @ 3 add back y
159 @ 3 add back y
160 | y
160 | y
161 o 2 remove y
161 o 2 remove y
162 | y
162 | y
163 o 1 rename x to y
163 o 1 rename x to y
164 | x y
164 | x y
165 o 0 add x
165 o 0 add x
166 x
166 x
167 $ hg debugp1copies -r 3
167 $ hg debugp1copies -r 3
168 $ hg debugpathcopies 0 3
168 $ hg debugpathcopies 0 3
169
169
170 Copy x to z, then remove z, then copy x2 (same content as x) to z. With copy metadata in the
170 Copy x to z, then remove z, then copy x2 (same content as x) to z. With copy metadata in the
171 changeset, the two copies here will have the same filelog entry, so ctx['z'].introrev() might point
171 changeset, the two copies here will have the same filelog entry, so ctx['z'].introrev() might point
172 to the first commit that added the file. We should still report the copy as being from x2.
172 to the first commit that added the file. We should still report the copy as being from x2.
173 $ newrepo
173 $ newrepo
174 $ echo x > x
174 $ echo x > x
175 $ echo x > x2
175 $ echo x > x2
176 $ hg ci -Aqm 'add x and x2 with same content'
176 $ hg ci -Aqm 'add x and x2 with same content'
177 $ hg cp x z
177 $ hg cp x z
178 $ hg ci -qm 'copy x to z'
178 $ hg ci -qm 'copy x to z'
179 $ hg rm z
179 $ hg rm z
180 $ hg ci -m 'remove z'
180 $ hg ci -m 'remove z'
181 $ hg cp x2 z
181 $ hg cp x2 z
182 $ hg ci -m 'copy x2 to z'
182 $ hg ci -m 'copy x2 to z'
183 $ hg l
183 $ hg l
184 @ 3 copy x2 to z
184 @ 3 copy x2 to z
185 | z
185 | z
186 o 2 remove z
186 o 2 remove z
187 | z
187 | z
188 o 1 copy x to z
188 o 1 copy x to z
189 | z
189 | z
190 o 0 add x and x2 with same content
190 o 0 add x and x2 with same content
191 x x2
191 x x2
192 $ hg debugp1copies -r 3
192 $ hg debugp1copies -r 3
193 x2 -> z
193 x2 -> z
194 $ hg debugpathcopies 0 3
194 $ hg debugpathcopies 0 3
195 x2 -> z
195 x2 -> z
196
196
197 Create x and y, then rename them both to the same name, but on different sides of a fork
197 Create x and y, then rename them both to the same name, but on different sides of a fork
198 $ newrepo
198 $ newrepo
199 $ echo x > x
199 $ echo x > x
200 $ echo y > y
200 $ echo y > y
201 $ hg ci -Aqm 'add x and y'
201 $ hg ci -Aqm 'add x and y'
202 $ hg mv x z
202 $ hg mv x z
203 $ hg ci -qm 'rename x to z'
203 $ hg ci -qm 'rename x to z'
204 $ hg co -q 0
204 $ hg co -q 0
205 $ hg mv y z
205 $ hg mv y z
206 $ hg ci -qm 'rename y to z'
206 $ hg ci -qm 'rename y to z'
207 $ hg l
207 $ hg l
208 @ 2 rename y to z
208 @ 2 rename y to z
209 | y z
209 | y z
210 | o 1 rename x to z
210 | o 1 rename x to z
211 |/ x z
211 |/ x z
212 o 0 add x and y
212 o 0 add x and y
213 x y
213 x y
214 $ hg debugpathcopies 1 2
214 $ hg debugpathcopies 1 2
215 z -> x
215 z -> x
216 y -> z
216 y -> z
217
217
218 Fork renames x to y on one side and removes x on the other
218 Fork renames x to y on one side and removes x on the other
219 $ newrepo
219 $ newrepo
220 $ echo x > x
220 $ echo x > x
221 $ hg ci -Aqm 'add x'
221 $ hg ci -Aqm 'add x'
222 $ hg mv x y
222 $ hg mv x y
223 $ hg ci -m 'rename x to y'
223 $ hg ci -m 'rename x to y'
224 $ hg co -q 0
224 $ hg co -q 0
225 $ hg rm x
225 $ hg rm x
226 $ hg ci -m 'remove x'
226 $ hg ci -m 'remove x'
227 created new head
227 created new head
228 $ hg l
228 $ hg l
229 @ 2 remove x
229 @ 2 remove x
230 | x
230 | x
231 | o 1 rename x to y
231 | o 1 rename x to y
232 |/ x y
232 |/ x y
233 o 0 add x
233 o 0 add x
234 x
234 x
235 $ hg debugpathcopies 1 2
235 $ hg debugpathcopies 1 2
236
236
237 Copies via null revision (there shouldn't be any)
237 Copies via null revision (there shouldn't be any)
238 $ newrepo
238 $ newrepo
239 $ echo x > x
239 $ echo x > x
240 $ hg ci -Aqm 'add x'
240 $ hg ci -Aqm 'add x'
241 $ hg cp x y
241 $ hg cp x y
242 $ hg ci -m 'copy x to y'
242 $ hg ci -m 'copy x to y'
243 $ hg co -q null
243 $ hg co -q null
244 $ echo x > x
244 $ echo x > x
245 $ hg ci -Aqm 'add x (again)'
245 $ hg ci -Aqm 'add x (again)'
246 $ hg l
246 $ hg l
247 @ 2 add x (again)
247 @ 2 add x (again)
248 x
248 x
249 o 1 copy x to y
249 o 1 copy x to y
250 | y
250 | y
251 o 0 add x
251 o 0 add x
252 x
252 x
253 $ hg debugpathcopies 1 2
253 $ hg debugpathcopies 1 2
254 $ hg debugpathcopies 2 1
254 $ hg debugpathcopies 2 1
255
255
256 Merge rename from other branch
256 Merge rename from other branch
257 $ newrepo
257 $ newrepo
258 $ echo x > x
258 $ echo x > x
259 $ hg ci -Aqm 'add x'
259 $ hg ci -Aqm 'add x'
260 $ hg mv x y
260 $ hg mv x y
261 $ hg ci -m 'rename x to y'
261 $ hg ci -m 'rename x to y'
262 $ hg co -q 0
262 $ hg co -q 0
263 $ echo z > z
263 $ echo z > z
264 $ hg ci -Aqm 'add z'
264 $ hg ci -Aqm 'add z'
265 $ hg merge -q 1
265 $ hg merge -q 1
266 $ hg debugp1copies
266 $ hg debugp1copies
267 $ hg debugp2copies
267 $ hg debugp2copies
268 $ hg ci -m 'merge rename from p2'
268 $ hg ci -m 'merge rename from p2'
269 $ hg l
269 $ hg l
270 @ 3 merge rename from p2
270 @ 3 merge rename from p2
271 |\ x
271 |\ x
272 | o 2 add z
272 | o 2 add z
273 | | z
273 | | z
274 o | 1 rename x to y
274 o | 1 rename x to y
275 |/ x y
275 |/ x y
276 o 0 add x
276 o 0 add x
277 x
277 x
278 Perhaps we should indicate the rename here, but `hg status` is documented to be weird during
278 Perhaps we should indicate the rename here, but `hg status` is documented to be weird during
279 merges, so...
279 merges, so...
280 $ hg debugp1copies -r 3
280 $ hg debugp1copies -r 3
281 $ hg debugp2copies -r 3
281 $ hg debugp2copies -r 3
282 $ hg debugpathcopies 0 3
282 $ hg debugpathcopies 0 3
283 x -> y
283 x -> y
284 $ hg debugpathcopies 1 2
284 $ hg debugpathcopies 1 2
285 y -> x
285 y -> x
286 $ hg debugpathcopies 1 3
286 $ hg debugpathcopies 1 3
287 $ hg debugpathcopies 2 3
287 $ hg debugpathcopies 2 3
288 x -> y
288 x -> y
289
289
290 Copy file from either side in a merge
290 Copy file from either side in a merge
291 $ newrepo
291 $ newrepo
292 $ echo x > x
292 $ echo x > x
293 $ hg ci -Aqm 'add x'
293 $ hg ci -Aqm 'add x'
294 $ hg co -q null
294 $ hg co -q null
295 $ echo y > y
295 $ echo y > y
296 $ hg ci -Aqm 'add y'
296 $ hg ci -Aqm 'add y'
297 $ hg merge -q 0
297 $ hg merge -q 0
298 $ hg cp y z
298 $ hg cp y z
299 $ hg debugp1copies
299 $ hg debugp1copies
300 y -> z
300 y -> z
301 $ hg debugp2copies
301 $ hg debugp2copies
302 $ hg ci -m 'copy file from p1 in merge'
302 $ hg ci -m 'copy file from p1 in merge'
303 $ hg co -q 1
303 $ hg co -q 1
304 $ hg merge -q 0
304 $ hg merge -q 0
305 $ hg cp x z
305 $ hg cp x z
306 $ hg debugp1copies
306 $ hg debugp1copies
307 $ hg debugp2copies
307 $ hg debugp2copies
308 x -> z
308 x -> z
309 $ hg ci -qm 'copy file from p2 in merge'
309 $ hg ci -qm 'copy file from p2 in merge'
310 $ hg l
310 $ hg l
311 @ 3 copy file from p2 in merge
311 @ 3 copy file from p2 in merge
312 |\ z
312 |\ z
313 +---o 2 copy file from p1 in merge
313 +---o 2 copy file from p1 in merge
314 | |/ z
314 | |/ z
315 | o 1 add y
315 | o 1 add y
316 | y
316 | y
317 o 0 add x
317 o 0 add x
318 x
318 x
319 $ hg debugp1copies -r 2
319 $ hg debugp1copies -r 2
320 y -> z
320 y -> z
321 $ hg debugp2copies -r 2
321 $ hg debugp2copies -r 2
322 $ hg debugpathcopies 1 2
322 $ hg debugpathcopies 1 2
323 y -> z
323 y -> z
324 $ hg debugpathcopies 0 2
324 $ hg debugpathcopies 0 2
325 $ hg debugp1copies -r 3
325 $ hg debugp1copies -r 3
326 $ hg debugp2copies -r 3
326 $ hg debugp2copies -r 3
327 x -> z
327 x -> z
328 $ hg debugpathcopies 1 3
328 $ hg debugpathcopies 1 3
329 $ hg debugpathcopies 0 3
329 $ hg debugpathcopies 0 3
330 x -> z
330 x -> z
331
331
332 Copy file that exists on both sides of the merge, same content on both sides
332 Copy file that exists on both sides of the merge, same content on both sides
333 $ newrepo
333 $ newrepo
334 $ echo x > x
334 $ echo x > x
335 $ hg ci -Aqm 'add x on branch 1'
335 $ hg ci -Aqm 'add x on branch 1'
336 $ hg co -q null
336 $ hg co -q null
337 $ echo x > x
337 $ echo x > x
338 $ hg ci -Aqm 'add x on branch 2'
338 $ hg ci -Aqm 'add x on branch 2'
339 $ hg merge -q 0
339 $ hg merge -q 0
340 $ hg cp x z
340 $ hg cp x z
341 $ hg debugp1copies
341 $ hg debugp1copies
342 x -> z
342 x -> z
343 $ hg debugp2copies
343 $ hg debugp2copies
344 $ hg ci -qm 'merge'
344 $ hg ci -qm 'merge'
345 $ hg l
345 $ hg l
346 @ 2 merge
346 @ 2 merge
347 |\ z
347 |\ z
348 | o 1 add x on branch 2
348 | o 1 add x on branch 2
349 | x
349 | x
350 o 0 add x on branch 1
350 o 0 add x on branch 1
351 x
351 x
352 $ hg debugp1copies -r 2
352 $ hg debugp1copies -r 2
353 x -> z
353 x -> z
354 $ hg debugp2copies -r 2
354 $ hg debugp2copies -r 2
355 It's a little weird that it shows up on both sides
355 It's a little weird that it shows up on both sides
356 $ hg debugpathcopies 1 2
356 $ hg debugpathcopies 1 2
357 x -> z
357 x -> z
358 $ hg debugpathcopies 0 2
358 $ hg debugpathcopies 0 2
359 x -> z (filelog !)
359 x -> z (filelog !)
360
360
361 Copy file that exists on both sides of the merge, different content
361 Copy file that exists on both sides of the merge, different content
362 $ newrepo
362 $ newrepo
363 $ echo branch1 > x
363 $ echo branch1 > x
364 $ hg ci -Aqm 'add x on branch 1'
364 $ hg ci -Aqm 'add x on branch 1'
365 $ hg co -q null
365 $ hg co -q null
366 $ echo branch2 > x
366 $ echo branch2 > x
367 $ hg ci -Aqm 'add x on branch 2'
367 $ hg ci -Aqm 'add x on branch 2'
368 $ hg merge -q 0
368 $ hg merge -q 0
369 warning: conflicts while merging x! (edit, then use 'hg resolve --mark')
369 warning: conflicts while merging x! (edit, then use 'hg resolve --mark')
370 [1]
370 [1]
371 $ echo resolved > x
371 $ echo resolved > x
372 $ hg resolve -m x
372 $ hg resolve -m x
373 (no more unresolved files)
373 (no more unresolved files)
374 $ hg cp x z
374 $ hg cp x z
375 $ hg debugp1copies
375 $ hg debugp1copies
376 x -> z
376 x -> z
377 $ hg debugp2copies
377 $ hg debugp2copies
378 $ hg ci -qm 'merge'
378 $ hg ci -qm 'merge'
379 $ hg l
379 $ hg l
380 @ 2 merge
380 @ 2 merge
381 |\ x z
381 |\ x z
382 | o 1 add x on branch 2
382 | o 1 add x on branch 2
383 | x
383 | x
384 o 0 add x on branch 1
384 o 0 add x on branch 1
385 x
385 x
386 $ hg debugp1copies -r 2
386 $ hg debugp1copies -r 2
387 x -> z (changeset !)
387 x -> z (changeset !)
388 $ hg debugp2copies -r 2
388 $ hg debugp2copies -r 2
389 x -> z (no-changeset !)
389 x -> z (no-changeset !)
390 $ hg debugpathcopies 1 2
390 $ hg debugpathcopies 1 2
391 x -> z (changeset !)
391 x -> z (changeset !)
392 $ hg debugpathcopies 0 2
392 $ hg debugpathcopies 0 2
393 x -> z (no-changeset !)
393 x -> z (no-changeset !)
394
394
395 Copy x->y on one side of merge and copy x->z on the other side. Pathcopies from one parent
395 Copy x->y on one side of merge and copy x->z on the other side. Pathcopies from one parent
396 of the merge to the merge should include the copy from the other side.
396 of the merge to the merge should include the copy from the other side.
397 $ newrepo
397 $ newrepo
398 $ echo x > x
398 $ echo x > x
399 $ hg ci -Aqm 'add x'
399 $ hg ci -Aqm 'add x'
400 $ hg cp x y
400 $ hg cp x y
401 $ hg ci -qm 'copy x to y'
401 $ hg ci -qm 'copy x to y'
402 $ hg co -q 0
402 $ hg co -q 0
403 $ hg cp x z
403 $ hg cp x z
404 $ hg ci -qm 'copy x to z'
404 $ hg ci -qm 'copy x to z'
405 $ hg merge -q 1
405 $ hg merge -q 1
406 $ hg ci -m 'merge copy x->y and copy x->z'
406 $ hg ci -m 'merge copy x->y and copy x->z'
407 $ hg l
407 $ hg l
408 @ 3 merge copy x->y and copy x->z
408 @ 3 merge copy x->y and copy x->z
409 |\
409 |\
410 | o 2 copy x to z
410 | o 2 copy x to z
411 | | z
411 | | z
412 o | 1 copy x to y
412 o | 1 copy x to y
413 |/ y
413 |/ y
414 o 0 add x
414 o 0 add x
415 x
415 x
416 $ hg debugp1copies -r 3
416 $ hg debugp1copies -r 3
417 $ hg debugp2copies -r 3
417 $ hg debugp2copies -r 3
418 $ hg debugpathcopies 2 3
418 $ hg debugpathcopies 2 3
419 x -> y
419 x -> y
420 $ hg debugpathcopies 1 3
420 $ hg debugpathcopies 1 3
421 x -> z
421 x -> z
422
422
423 Copy x to y on one side of merge, create y and rename to z on the other side. Pathcopies from the
423 Copy x to y on one side of merge, create y and rename to z on the other side. Pathcopies from the
424 first side should not include the y->z rename since y didn't exist in the merge base.
424 first side should not include the y->z rename since y didn't exist in the merge base.
425 $ newrepo
425 $ newrepo
426 $ echo x > x
426 $ echo x > x
427 $ hg ci -Aqm 'add x'
427 $ hg ci -Aqm 'add x'
428 $ hg cp x y
428 $ hg cp x y
429 $ hg ci -qm 'copy x to y'
429 $ hg ci -qm 'copy x to y'
430 $ hg co -q 0
430 $ hg co -q 0
431 $ echo y > y
431 $ echo y > y
432 $ hg ci -Aqm 'add y'
432 $ hg ci -Aqm 'add y'
433 $ hg mv y z
433 $ hg mv y z
434 $ hg ci -m 'rename y to z'
434 $ hg ci -m 'rename y to z'
435 $ hg merge -q 1
435 $ hg merge -q 1
436 $ hg ci -m 'merge'
436 $ hg ci -m 'merge'
437 $ hg l
437 $ hg l
438 @ 4 merge
438 @ 4 merge
439 |\
439 |\
440 | o 3 rename y to z
440 | o 3 rename y to z
441 | | y z
441 | | y z
442 | o 2 add y
442 | o 2 add y
443 | | y
443 | | y
444 o | 1 copy x to y
444 o | 1 copy x to y
445 |/ y
445 |/ y
446 o 0 add x
446 o 0 add x
447 x
447 x
448 $ hg debugp1copies -r 3
448 $ hg debugp1copies -r 3
449 y -> z
449 y -> z
450 $ hg debugp2copies -r 3
450 $ hg debugp2copies -r 3
451 $ hg debugpathcopies 2 3
451 $ hg debugpathcopies 2 3
452 y -> z
452 y -> z
453 $ hg debugpathcopies 1 3
453 $ hg debugpathcopies 1 3
454
454
455 Create x and y, then rename x to z on one side of merge, and rename y to z and modify z on the
455 Create x and y, then rename x to z on one side of merge, and rename y to z and modify z on the
456 other side.
456 other side.
457 $ newrepo
457 $ newrepo
458 $ echo x > x
458 $ echo x > x
459 $ echo y > y
459 $ echo y > y
460 $ hg ci -Aqm 'add x and y'
460 $ hg ci -Aqm 'add x and y'
461 $ hg mv x z
461 $ hg mv x z
462 $ hg ci -qm 'rename x to z'
462 $ hg ci -qm 'rename x to z'
463 $ hg co -q 0
463 $ hg co -q 0
464 $ hg mv y z
464 $ hg mv y z
465 $ hg ci -qm 'rename y to z'
465 $ hg ci -qm 'rename y to z'
466 $ echo z >> z
466 $ echo z >> z
467 $ hg ci -m 'modify z'
467 $ hg ci -m 'modify z'
468 $ hg merge -q 1
468 $ hg merge -q 1
469 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
469 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
470 [1]
470 [1]
471 $ echo z > z
471 $ echo z > z
472 $ hg resolve -qm z
472 $ hg resolve -qm z
473 $ hg ci -m 'merge 1 into 3'
473 $ hg ci -m 'merge 1 into 3'
474 Try merging the other direction too
474 Try merging the other direction too
475 $ hg co -q 1
475 $ hg co -q 1
476 $ hg merge -q 3
476 $ hg merge -q 3
477 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
477 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
478 [1]
478 [1]
479 $ echo z > z
479 $ echo z > z
480 $ hg resolve -qm z
480 $ hg resolve -qm z
481 $ hg ci -m 'merge 3 into 1'
481 $ hg ci -m 'merge 3 into 1'
482 created new head
482 created new head
483 $ hg l
483 $ hg l
484 @ 5 merge 3 into 1
484 @ 5 merge 3 into 1
485 |\ y z
485 |\ y z
486 +---o 4 merge 1 into 3
486 +---o 4 merge 1 into 3
487 | |/ x z
487 | |/ x z
488 | o 3 modify z
488 | o 3 modify z
489 | | z
489 | | z
490 | o 2 rename y to z
490 | o 2 rename y to z
491 | | y z
491 | | y z
492 o | 1 rename x to z
492 o | 1 rename x to z
493 |/ x z
493 |/ x z
494 o 0 add x and y
494 o 0 add x and y
495 x y
495 x y
496 $ hg debugpathcopies 1 4
496 $ hg debugpathcopies 1 4
497 $ hg debugpathcopies 2 4
497 $ hg debugpathcopies 2 4
498 $ hg debugpathcopies 0 4
498 $ hg debugpathcopies 0 4
499 x -> z (filelog !)
499 x -> z (filelog !)
500 y -> z (compatibility !)
500 y -> z (compatibility !)
501 $ hg debugpathcopies 1 5
501 $ hg debugpathcopies 1 5
502 $ hg debugpathcopies 2 5
502 $ hg debugpathcopies 2 5
503 $ hg debugpathcopies 0 5
503 $ hg debugpathcopies 0 5
504 x -> z
504 x -> z
505
505
506
506
507 Test for a case in fullcopytracing algorithm where neither of the merging csets
507 Test for a case in fullcopytracing algorithm where neither of the merging csets
508 is a descendant of the merge base. This test reflects that the algorithm
508 is a descendant of the merge base. This test reflects that the algorithm
509 correctly finds the copies:
509 correctly finds the copies:
510
510
511 $ cat >> $HGRCPATH << EOF
511 $ cat >> $HGRCPATH << EOF
512 > [experimental]
512 > [experimental]
513 > evolution.createmarkers=True
513 > evolution.createmarkers=True
514 > evolution.allowunstable=True
514 > evolution.allowunstable=True
515 > EOF
515 > EOF
516
516
517 $ newrepo
517 $ newrepo
518 $ echo a > a
518 $ echo a > a
519 $ hg add a
519 $ hg add a
520 $ hg ci -m "added a"
520 $ hg ci -m "added a"
521 $ echo b > b
521 $ echo b > b
522 $ hg add b
522 $ hg add b
523 $ hg ci -m "added b"
523 $ hg ci -m "added b"
524
524
525 $ hg mv b b1
525 $ hg mv b b1
526 $ hg ci -m "rename b to b1"
526 $ hg ci -m "rename b to b1"
527
527
528 $ hg up ".^"
528 $ hg up ".^"
529 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
529 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
530 $ echo d > d
530 $ echo d > d
531 $ hg add d
531 $ hg add d
532 $ hg ci -m "added d"
532 $ hg ci -m "added d"
533 created new head
533 created new head
534
534
535 $ echo baba >> b
535 $ echo baba >> b
536 $ hg ci --amend -m "added d, modified b"
536 $ hg ci --amend -m "added d, modified b"
537
537
538 $ hg l --hidden
538 $ hg l --hidden
539 @ 4 added d, modified b
539 @ 4 added d, modified b
540 | b d
540 | b d
541 | x 3 added d
541 | x 3 added d
542 |/ d
542 |/ d
543 | o 2 rename b to b1
543 | o 2 rename b to b1
544 |/ b b1
544 |/ b b1
545 o 1 added b
545 o 1 added b
546 | b
546 | b
547 o 0 added a
547 o 0 added a
548 a
548 a
549
549
550 Grafting revision 4 on top of revision 2, showing that it respect the rename:
550 Grafting revision 4 on top of revision 2, showing that it respect the rename:
551
551
552 $ hg up 2 -q
552 $ hg up 2 -q
553 $ hg graft -r 4 --base 3 --hidden
553 $ hg graft -r 4 --base 3 --hidden
554 grafting 4:af28412ec03c "added d, modified b" (tip)
554 grafting 4:af28412ec03c "added d, modified b" (tip) (no-changeset !)
555 grafting 4:6325ca0b7a1c "added d, modified b" (tip) (changeset !)
555 merging b1 and b to b1
556 merging b1 and b to b1
556
557
557 $ hg l -l1 -p
558 $ hg l -l1 -p
558 @ 5 added d, modified b
559 @ 5 added d, modified b
559 | b1
560 | b1
560 ~ diff -r 5a4825cc2926 -r 94a2f1a0e8e2 b1 (no-changeset !)
561 ~ diff -r 5a4825cc2926 -r 94a2f1a0e8e2 b1 (no-changeset !)
561 ~ diff -r f5474f5023a8 -r ef7c02d69f3d b1 (changeset !)
562 ~ diff -r df722b7fe2d5 -r ba3ddbbdfd96 b1 (changeset !)
562 --- a/b1 Thu Jan 01 00:00:00 1970 +0000
563 --- a/b1 Thu Jan 01 00:00:00 1970 +0000
563 +++ b/b1 Thu Jan 01 00:00:00 1970 +0000
564 +++ b/b1 Thu Jan 01 00:00:00 1970 +0000
564 @@ -1,1 +1,2 @@
565 @@ -1,1 +1,2 @@
565 b
566 b
566 +baba
567 +baba
567
568
568 Test to make sure that fullcopytracing algorithm doesn't fail when neither of the
569 Test to make sure that fullcopytracing algorithm doesn't fail when neither of the
569 merging csets is a descendant of the base.
570 merging csets is a descendant of the base.
570 -------------------------------------------------------------------------------------------------
571 -------------------------------------------------------------------------------------------------
571
572
572 $ newrepo
573 $ newrepo
573 $ echo a > a
574 $ echo a > a
574 $ hg add a
575 $ hg add a
575 $ hg ci -m "added a"
576 $ hg ci -m "added a"
576 $ echo b > b
577 $ echo b > b
577 $ hg add b
578 $ hg add b
578 $ hg ci -m "added b"
579 $ hg ci -m "added b"
579
580
580 $ echo foobar > willconflict
581 $ echo foobar > willconflict
581 $ hg add willconflict
582 $ hg add willconflict
582 $ hg ci -m "added willconflict"
583 $ hg ci -m "added willconflict"
583 $ echo c > c
584 $ echo c > c
584 $ hg add c
585 $ hg add c
585 $ hg ci -m "added c"
586 $ hg ci -m "added c"
586
587
587 $ hg l
588 $ hg l
588 @ 3 added c
589 @ 3 added c
589 | c
590 | c
590 o 2 added willconflict
591 o 2 added willconflict
591 | willconflict
592 | willconflict
592 o 1 added b
593 o 1 added b
593 | b
594 | b
594 o 0 added a
595 o 0 added a
595 a
596 a
596
597
597 $ hg up ".^^"
598 $ hg up ".^^"
598 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
599 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
599 $ echo d > d
600 $ echo d > d
600 $ hg add d
601 $ hg add d
601 $ hg ci -m "added d"
602 $ hg ci -m "added d"
602 created new head
603 created new head
603
604
604 $ echo barfoo > willconflict
605 $ echo barfoo > willconflict
605 $ hg add willconflict
606 $ hg add willconflict
606 $ hg ci --amend -m "added willconflict and d"
607 $ hg ci --amend -m "added willconflict and d"
607
608
608 $ hg l
609 $ hg l
609 @ 5 added willconflict and d
610 @ 5 added willconflict and d
610 | d willconflict
611 | d willconflict
611 | o 3 added c
612 | o 3 added c
612 | | c
613 | | c
613 | o 2 added willconflict
614 | o 2 added willconflict
614 |/ willconflict
615 |/ willconflict
615 o 1 added b
616 o 1 added b
616 | b
617 | b
617 o 0 added a
618 o 0 added a
618 a
619 a
619
620
620 $ hg rebase -r . -d 2 -t :other
621 $ hg rebase -r . -d 2 -t :other
621 rebasing 5:5018b1509e94 "added willconflict and d" (tip)
622 rebasing 5:5018b1509e94 "added willconflict and d" (tip) (no-changeset !)
623 rebasing 5:619047c26bf8 "added willconflict and d" (tip) (changeset !)
622
624
623 $ hg up 3 -q
625 $ hg up 3 -q
624 $ hg l --hidden
626 $ hg l --hidden
625 o 6 added willconflict and d
627 o 6 added willconflict and d
626 | d willconflict
628 | d willconflict
627 | x 5 added willconflict and d
629 | x 5 added willconflict and d
628 | | d willconflict
630 | | d willconflict
629 | | x 4 added d
631 | | x 4 added d
630 | |/ d
632 | |/ d
631 +---@ 3 added c
633 +---@ 3 added c
632 | | c
634 | | c
633 o | 2 added willconflict
635 o | 2 added willconflict
634 |/ willconflict
636 |/ willconflict
635 o 1 added b
637 o 1 added b
636 | b
638 | b
637 o 0 added a
639 o 0 added a
638 a
640 a
639
641
640 Now if we trigger a merge between revision 3 and 6 using base revision 4,
642 Now if we trigger a merge between revision 3 and 6 using base revision 4,
641 neither of the merging csets will be a descendant of the base revision:
643 neither of the merging csets will be a descendant of the base revision:
642
644
643 $ hg graft -r 6 --base 4 --hidden -t :other
645 $ hg graft -r 6 --base 4 --hidden -t :other
644 grafting 6:99802e4f1e46 "added willconflict and d" (tip)
646 grafting 6:99802e4f1e46 "added willconflict and d" (tip) (no-changeset !)
647 grafting 6:9ddc6fb3b691 "added willconflict and d" (tip) (changeset !)
General Comments 0
You need to be logged in to leave comments. Login now